file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
spannerautoscaler_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"sync"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
ctrlbuilder "sigs.k8s.io/controller-runtime/pkg/builder"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
ctrlmanager "sigs.k8s.io/controller-runtime/pkg/manager"
ctrlreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
spannerv1beta1 "github.com/mercari/spanner-autoscaler/api/v1beta1"
"github.com/mercari/spanner-autoscaler/pkg/logging"
"github.com/mercari/spanner-autoscaler/pkg/spanner"
"github.com/mercari/spanner-autoscaler/pkg/syncer"
)
var (
errFetchServiceAccountJSONNoNameSpecified = errors.New("no name specified")
errFetchServiceAccountJSONNoKeySpecified = errors.New("no key specified")
errFetchServiceAccountJSONNoSecretFound = errors.New("no secret found by specified name")
errFetchServiceAccountJSONNoSecretDataFound = errors.New("no secret found by specified key")
errInvalidExclusiveCredentials = errors.New("impersonateConfig and iamKeySecret are mutually exclusive")
)
// TODO: move this to 'defaulting' webhook
const defaultScaledownStepSize = 2
// SpannerAutoscalerReconciler reconciles a SpannerAutoscaler object.
type SpannerAutoscalerReconciler struct {
ctrlClient ctrlclient.Client
apiReader ctrlclient.Reader
scheme *runtime.Scheme
recorder record.EventRecorder
syncers map[types.NamespacedName]syncer.Syncer
scaleDownInterval time.Duration
clock utilclock.Clock
log logr.Logger
mu sync.RWMutex
}
var _ ctrlreconcile.Reconciler = (*SpannerAutoscalerReconciler)(nil)
type Option func(*SpannerAutoscalerReconciler)
func WithSyncers(syncers map[types.NamespacedName]syncer.Syncer) Option {
return func(r *SpannerAutoscalerReconciler) {
r.syncers = syncers
}
}
func WithScaleDownInterval(scaleDownInterval time.Duration) Option {
return func(r *SpannerAutoscalerReconciler) {
r.scaleDownInterval = scaleDownInterval
}
}
func WithClock(clock utilclock.Clock) Option {
return func(r *SpannerAutoscalerReconciler) {
r.clock = clock
}
}
func WithLog(log logr.Logger) Option {
return func(r *SpannerAutoscalerReconciler) {
r.log = log.WithName("spannerautoscaler")
}
}
// NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler.
func NewSpannerAutoscalerReconciler(
ctrlClient ctrlclient.Client,
apiReader ctrlclient.Reader,
scheme *runtime.Scheme,
recorder record.EventRecorder,
logger logr.Logger,
opts ...Option,
) *SpannerAutoscalerReconciler {
r := &SpannerAutoscalerReconciler{
ctrlClient: ctrlClient,
apiReader: apiReader,
scheme: scheme,
recorder: recorder,
syncers: make(map[types.NamespacedName]syncer.Syncer),
scaleDownInterval: 55 * time.Minute,
clock: utilclock.RealClock{},
log: logger,
}
for _, opt := range opts {
opt(r)
}
return r
}
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get,resourceNames=spanner-autoscaler-gcp-sa
// Reconcile implements ctrlreconcile.Reconciler.
func (r *SpannerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrlreconcile.Request) (ctrlreconcile.Result, error) {
nn := req.NamespacedName
log := r.log.WithValues("namespaced name", nn)
r.mu.RLock()
s, syncerExists := r.syncers[nn]
r.mu.RUnlock()
var sa spannerv1beta1.SpannerAutoscaler
if err := r.ctrlClient.Get(ctx, nn, &sa); err != nil {
err = ctrlclient.IgnoreNotFound(err)
if err != nil {
log.Error(err, "failed to get spanner-autoscaler")
return ctrlreconcile.Result{}, err
}
log.V(2).Info("checking if a syncer exists")
if syncerExists {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
log.Info("stopped syncer")
}
return ctrlreconcile.Result{}, nil
}
// TODO: move this to the defaulting webhook
if sa.Spec.ScaleConfig.ScaledownStepSize == 0 {
sa.Spec.ScaleConfig.ScaledownStepSize = defaultScaledownStepSize
}
log.V(1).Info("resource status", "spannerautoscaler", sa)
credentials, err := r.fetchCredentials(ctx, &sa)
if err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "ServiceAccountRequired", err.Error())
log.Error(err, "failed to fetch service account")
return ctrlreconcile.Result{}, err
}
// If the syncer does not exist, start a syncer.
if !syncerExists {
log.V(2).Info("syncer does not exist, starting a new syncer")
ctx = logging.WithContext(ctx, log)
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// If target spanner instance or service account have been changed, then just replace syncer.
if s.UpdateTarget(sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials) {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
log.Info("replaced syncer", "namespaced name", sa)
return ctrlreconcile.Result{}, nil
}
log.V(1).Info("checking to see if we need to calculate processing units", "sa", sa)
if !r.needCalcProcessingUnits(&sa) {
return ctrlreconcile.Result{}, nil
}
// TODO: change this to pass the object instead of so many parameters
desiredProcessingUnits := calcDesiredProcessingUnits(
sa.Status.CurrentHighPriorityCPUUtilization,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.TargetCPUUtilization.HighPriority,
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Min, sa.Spec.ScaleConfig.Nodes.Min, sa.Spec.ScaleConfig.ComputeType),
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Max, sa.Spec.ScaleConfig.Nodes.Max, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.ScaledownStepSize,
)
now := r.clock.Now()
log.V(1).Info("processing units need to be changed", "desiredProcessingUnits", desiredProcessingUnits, "sa.Status", sa.Status)
if !r.needUpdateProcessingUnits(&sa, desiredProcessingUnits, now) {
return ctrlreconcile.Result{}, nil
}
if err := s.UpdateInstance(ctx, desiredProcessingUnits); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateInstance", err.Error())
log.Error(err, "failed to update spanner instance status")
return ctrlreconcile.Result{}, err
}
r.recorder.Eventf(&sa, corev1.EventTypeNormal, "Updated", "Updated processing units of %s/%s from %d to %d", sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), desiredProcessingUnits)
log.Info("updated nodes via google cloud api", "before", normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), "after", desiredProcessingUnits)
saCopy := sa.DeepCopy()
saCopy.Status.DesiredProcessingUnits = desiredProcessingUnits
saCopy.Status.DesiredNodes = desiredProcessingUnits / 1000
saCopy.Status.LastScaleTime = metav1.Time{Time: now}
if err = r.ctrlClient.Status().Update(ctx, saCopy); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
log.Error(err, "failed to update spanner autoscaler status")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// TODO: convert all internal computations to processing units only
func normalizeProcessingUnitsOrNodes(pu, nodes int, computeType spannerv1beta1.ComputeType) int {
switch computeType {
case spannerv1beta1.ComputeTypePU:
return pu
case spannerv1beta1.ComputeTypeNode:
return nodes * 1000
default:
return -1
}
}
// SetupWithManager sets up the controller with ctrlmanager.Manager.
func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {
opts := ctrlcontroller.Options{
Reconciler: r,
}
return ctrlbuilder.ControllerManagedBy(mgr).
For(&spannerv1beta1.SpannerAutoscaler{}).
WithOptions(opts).
Complete(r)
}
func (r *SpannerAutoscalerReconciler) | (ctx context.Context, nn types.NamespacedName, projectID, instanceID string, credentials *syncer.Credentials) error {
log := logging.FromContext(ctx)
s, err := syncer.New(ctx, r.ctrlClient, nn, projectID, instanceID, credentials, r.recorder, syncer.WithLog(log))
if err != nil {
return err
}
go s.Start()
r.mu.Lock()
r.syncers[nn] = s
r.mu.Unlock()
log.V(1).Info("added syncer")
return nil
}
func (r *SpannerAutoscalerReconciler) needCalcProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler) bool {
log := r.log
switch {
// TODO: Fix this to use only processing units
case sa.Status.CurrentProcessingUnits == 0 && sa.Status.CurrentNodes == 0:
log.Info("current processing units have not fetched yet")
return false
case sa.Status.InstanceState != spanner.StateReady:
log.Info("instance state is not ready")
return false
default:
return true
}
}
func (r *SpannerAutoscalerReconciler) needUpdateProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler, desiredProcessingUnits int, now time.Time) bool {
log := r.log
currentProcessingUnits := normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType)
switch {
case desiredProcessingUnits == currentProcessingUnits:
log.V(0).Info("the desired number of processing units is equal to that of the current; no need to scale")
return false
case desiredProcessingUnits > currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(10*time.Second)):
log.Info("too short to scale up since instance scaled last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
case desiredProcessingUnits < currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(r.scaleDownInterval)):
log.Info("too short to scale down since instance scaled nodes last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
default:
return true
}
}
// For testing purpose only
func calcDesiredNodes(currentCPU, currentNodes, targetCPU, minNodes, maxNodes, scaledownStepSize int) int {
return calcDesiredProcessingUnits(currentCPU, currentNodes*1000, targetCPU, minNodes*1000, maxNodes*1000, scaledownStepSize) / 1000
}
// nextValidProcessingUnits finds next valid value in processing units.
// https://cloud.google.com/spanner/docs/compute-capacity?hl=en
// Valid values are
// If processingUnits < 1000, processing units must be multiples of 100.
// If processingUnits >= 1000, processing units must be multiples of 1000.
func nextValidProcessingUnits(processingUnits int) int {
if processingUnits < 1000 {
return ((processingUnits / 100) + 1) * 100
}
return ((processingUnits / 1000) + 1) * 1000
}
func maxInt(first int, rest ...int) int {
result := first
for _, v := range rest {
if result < v {
result = v
}
}
return result
}
// calcDesiredProcessingUnits calculates the values needed to keep CPU utilization below TargetCPU.
func calcDesiredProcessingUnits(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize int) int {
totalCPUProduct1000 := currentCPU * currentProcessingUnits
desiredProcessingUnits := maxInt(nextValidProcessingUnits(totalCPUProduct1000/targetCPU), currentProcessingUnits-scaledownStepSize*1000)
switch {
case desiredProcessingUnits < minProcessingUnits:
return minProcessingUnits
case desiredProcessingUnits > maxProcessingUnits:
return maxProcessingUnits
default:
return desiredProcessingUnits
}
}
func (r *SpannerAutoscalerReconciler) fetchCredentials(ctx context.Context, sa *spannerv1beta1.SpannerAutoscaler) (*syncer.Credentials, error) {
iamKeySecret := sa.Spec.Authentication.IAMKeySecret
impersonateConfig := sa.Spec.Authentication.ImpersonateConfig
// TODO: move this to 'validating' webhook
if iamKeySecret != nil && impersonateConfig != nil {
return nil, errInvalidExclusiveCredentials
}
switch sa.Spec.Authentication.Type {
case spannerv1beta1.AuthTypeSA:
if iamKeySecret.Name == "" {
return nil, errFetchServiceAccountJSONNoNameSpecified
}
if iamKeySecret.Key == "" {
return nil, errFetchServiceAccountJSONNoKeySpecified
}
var namespace string
// TODO: move this to 'defaulting' webhook
if iamKeySecret.Namespace == "" {
namespace = sa.Namespace
} else {
namespace = iamKeySecret.Namespace
}
var secret corev1.Secret
key := ctrlclient.ObjectKey{
Name: iamKeySecret.Name,
Namespace: namespace,
}
if err := r.apiReader.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
return nil, errFetchServiceAccountJSONNoSecretFound
}
return nil, err
}
serviceAccountJSON, ok := secret.Data[iamKeySecret.Key]
if !ok {
return nil, errFetchServiceAccountJSONNoSecretDataFound
}
return syncer.NewServiceAccountJSONCredentials(serviceAccountJSON), nil
case spannerv1beta1.AuthTypeImpersonation:
return syncer.NewServiceAccountImpersonate(impersonateConfig.TargetServiceAccount, impersonateConfig.Delegates), nil
default:
return syncer.NewADCCredentials(), nil
}
}
| startSyncer | identifier_name |
spannerautoscaler_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"sync"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
ctrlbuilder "sigs.k8s.io/controller-runtime/pkg/builder"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
ctrlmanager "sigs.k8s.io/controller-runtime/pkg/manager"
ctrlreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
spannerv1beta1 "github.com/mercari/spanner-autoscaler/api/v1beta1"
"github.com/mercari/spanner-autoscaler/pkg/logging"
"github.com/mercari/spanner-autoscaler/pkg/spanner"
"github.com/mercari/spanner-autoscaler/pkg/syncer"
)
var (
errFetchServiceAccountJSONNoNameSpecified = errors.New("no name specified")
errFetchServiceAccountJSONNoKeySpecified = errors.New("no key specified")
errFetchServiceAccountJSONNoSecretFound = errors.New("no secret found by specified name")
errFetchServiceAccountJSONNoSecretDataFound = errors.New("no secret found by specified key")
errInvalidExclusiveCredentials = errors.New("impersonateConfig and iamKeySecret are mutually exclusive")
)
// TODO: move this to 'defaulting' webhook
const defaultScaledownStepSize = 2
// SpannerAutoscalerReconciler reconciles a SpannerAutoscaler object.
type SpannerAutoscalerReconciler struct {
ctrlClient ctrlclient.Client
apiReader ctrlclient.Reader
scheme *runtime.Scheme
recorder record.EventRecorder
syncers map[types.NamespacedName]syncer.Syncer
scaleDownInterval time.Duration
clock utilclock.Clock
log logr.Logger
mu sync.RWMutex
}
var _ ctrlreconcile.Reconciler = (*SpannerAutoscalerReconciler)(nil)
type Option func(*SpannerAutoscalerReconciler)
func WithSyncers(syncers map[types.NamespacedName]syncer.Syncer) Option {
return func(r *SpannerAutoscalerReconciler) {
r.syncers = syncers
}
}
func WithScaleDownInterval(scaleDownInterval time.Duration) Option {
return func(r *SpannerAutoscalerReconciler) {
r.scaleDownInterval = scaleDownInterval
}
}
func WithClock(clock utilclock.Clock) Option {
return func(r *SpannerAutoscalerReconciler) {
r.clock = clock
}
}
func WithLog(log logr.Logger) Option {
return func(r *SpannerAutoscalerReconciler) {
r.log = log.WithName("spannerautoscaler")
}
}
// NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler.
func NewSpannerAutoscalerReconciler(
ctrlClient ctrlclient.Client,
apiReader ctrlclient.Reader,
scheme *runtime.Scheme,
recorder record.EventRecorder,
logger logr.Logger,
opts ...Option,
) *SpannerAutoscalerReconciler {
r := &SpannerAutoscalerReconciler{
ctrlClient: ctrlClient,
apiReader: apiReader,
scheme: scheme,
recorder: recorder,
syncers: make(map[types.NamespacedName]syncer.Syncer),
scaleDownInterval: 55 * time.Minute,
clock: utilclock.RealClock{},
log: logger,
}
for _, opt := range opts {
opt(r)
}
return r
}
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get,resourceNames=spanner-autoscaler-gcp-sa
// Reconcile implements ctrlreconcile.Reconciler.
func (r *SpannerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrlreconcile.Request) (ctrlreconcile.Result, error) {
nn := req.NamespacedName
log := r.log.WithValues("namespaced name", nn)
r.mu.RLock()
s, syncerExists := r.syncers[nn]
r.mu.RUnlock()
var sa spannerv1beta1.SpannerAutoscaler
if err := r.ctrlClient.Get(ctx, nn, &sa); err != nil {
err = ctrlclient.IgnoreNotFound(err)
if err != nil {
log.Error(err, "failed to get spanner-autoscaler")
return ctrlreconcile.Result{}, err
}
log.V(2).Info("checking if a syncer exists")
if syncerExists {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
log.Info("stopped syncer")
}
return ctrlreconcile.Result{}, nil
}
// TODO: move this to the defaulting webhook
if sa.Spec.ScaleConfig.ScaledownStepSize == 0 {
sa.Spec.ScaleConfig.ScaledownStepSize = defaultScaledownStepSize
}
log.V(1).Info("resource status", "spannerautoscaler", sa)
credentials, err := r.fetchCredentials(ctx, &sa)
if err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "ServiceAccountRequired", err.Error())
log.Error(err, "failed to fetch service account")
return ctrlreconcile.Result{}, err
}
// If the syncer does not exist, start a syncer.
if !syncerExists {
log.V(2).Info("syncer does not exist, starting a new syncer")
ctx = logging.WithContext(ctx, log)
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// If target spanner instance or service account have been changed, then just replace syncer.
if s.UpdateTarget(sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials) {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
log.Info("replaced syncer", "namespaced name", sa)
return ctrlreconcile.Result{}, nil
}
log.V(1).Info("checking to see if we need to calculate processing units", "sa", sa)
if !r.needCalcProcessingUnits(&sa) {
return ctrlreconcile.Result{}, nil
}
// TODO: change this to pass the object instead of so many parameters
desiredProcessingUnits := calcDesiredProcessingUnits(
sa.Status.CurrentHighPriorityCPUUtilization,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.TargetCPUUtilization.HighPriority,
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Min, sa.Spec.ScaleConfig.Nodes.Min, sa.Spec.ScaleConfig.ComputeType),
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Max, sa.Spec.ScaleConfig.Nodes.Max, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.ScaledownStepSize,
)
now := r.clock.Now()
log.V(1).Info("processing units need to be changed", "desiredProcessingUnits", desiredProcessingUnits, "sa.Status", sa.Status)
if !r.needUpdateProcessingUnits(&sa, desiredProcessingUnits, now) {
return ctrlreconcile.Result{}, nil
}
if err := s.UpdateInstance(ctx, desiredProcessingUnits); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateInstance", err.Error())
log.Error(err, "failed to update spanner instance status")
return ctrlreconcile.Result{}, err
}
r.recorder.Eventf(&sa, corev1.EventTypeNormal, "Updated", "Updated processing units of %s/%s from %d to %d", sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), desiredProcessingUnits)
log.Info("updated nodes via google cloud api", "before", normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), "after", desiredProcessingUnits)
saCopy := sa.DeepCopy()
saCopy.Status.DesiredProcessingUnits = desiredProcessingUnits
saCopy.Status.DesiredNodes = desiredProcessingUnits / 1000
saCopy.Status.LastScaleTime = metav1.Time{Time: now}
if err = r.ctrlClient.Status().Update(ctx, saCopy); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
log.Error(err, "failed to update spanner autoscaler status")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// TODO: convert all internal computations to processing units only
func normalizeProcessingUnitsOrNodes(pu, nodes int, computeType spannerv1beta1.ComputeType) int {
switch computeType {
case spannerv1beta1.ComputeTypePU:
return pu
case spannerv1beta1.ComputeTypeNode:
return nodes * 1000
default:
return -1
}
}
// SetupWithManager sets up the controller with ctrlmanager.Manager.
func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {
opts := ctrlcontroller.Options{
Reconciler: r,
}
return ctrlbuilder.ControllerManagedBy(mgr).
For(&spannerv1beta1.SpannerAutoscaler{}).
WithOptions(opts).
Complete(r)
}
func (r *SpannerAutoscalerReconciler) startSyncer(ctx context.Context, nn types.NamespacedName, projectID, instanceID string, credentials *syncer.Credentials) error {
log := logging.FromContext(ctx)
s, err := syncer.New(ctx, r.ctrlClient, nn, projectID, instanceID, credentials, r.recorder, syncer.WithLog(log))
if err != nil {
return err
}
go s.Start()
r.mu.Lock()
r.syncers[nn] = s
r.mu.Unlock()
log.V(1).Info("added syncer")
return nil
}
func (r *SpannerAutoscalerReconciler) needCalcProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler) bool {
log := r.log
switch {
// TODO: Fix this to use only processing units
case sa.Status.CurrentProcessingUnits == 0 && sa.Status.CurrentNodes == 0:
log.Info("current processing units have not fetched yet")
return false
case sa.Status.InstanceState != spanner.StateReady:
log.Info("instance state is not ready")
return false
default:
return true
}
}
func (r *SpannerAutoscalerReconciler) needUpdateProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler, desiredProcessingUnits int, now time.Time) bool {
log := r.log
currentProcessingUnits := normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType)
switch {
case desiredProcessingUnits == currentProcessingUnits:
log.V(0).Info("the desired number of processing units is equal to that of the current; no need to scale")
return false
case desiredProcessingUnits > currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(10*time.Second)):
log.Info("too short to scale up since instance scaled last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
case desiredProcessingUnits < currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(r.scaleDownInterval)):
log.Info("too short to scale down since instance scaled nodes last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
default:
return true
}
}
// For testing purpose only
func calcDesiredNodes(currentCPU, currentNodes, targetCPU, minNodes, maxNodes, scaledownStepSize int) int {
return calcDesiredProcessingUnits(currentCPU, currentNodes*1000, targetCPU, minNodes*1000, maxNodes*1000, scaledownStepSize) / 1000
}
// nextValidProcessingUnits finds next valid value in processing units.
// https://cloud.google.com/spanner/docs/compute-capacity?hl=en
// Valid values are
// If processingUnits < 1000, processing units must be multiples of 100.
// If processingUnits >= 1000, processing units must be multiples of 1000.
func nextValidProcessingUnits(processingUnits int) int {
if processingUnits < 1000 {
return ((processingUnits / 100) + 1) * 100
}
return ((processingUnits / 1000) + 1) * 1000
}
func maxInt(first int, rest ...int) int {
result := first
for _, v := range rest {
if result < v {
result = v
}
}
return result
}
// calcDesiredProcessingUnits calculates the values needed to keep CPU utilization below TargetCPU.
func calcDesiredProcessingUnits(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize int) int {
totalCPUProduct1000 := currentCPU * currentProcessingUnits
desiredProcessingUnits := maxInt(nextValidProcessingUnits(totalCPUProduct1000/targetCPU), currentProcessingUnits-scaledownStepSize*1000)
switch {
case desiredProcessingUnits < minProcessingUnits:
return minProcessingUnits
case desiredProcessingUnits > maxProcessingUnits:
return maxProcessingUnits
default:
return desiredProcessingUnits
}
}
func (r *SpannerAutoscalerReconciler) fetchCredentials(ctx context.Context, sa *spannerv1beta1.SpannerAutoscaler) (*syncer.Credentials, error) {
iamKeySecret := sa.Spec.Authentication.IAMKeySecret
impersonateConfig := sa.Spec.Authentication.ImpersonateConfig
// TODO: move this to 'validating' webhook
if iamKeySecret != nil && impersonateConfig != nil {
return nil, errInvalidExclusiveCredentials
}
switch sa.Spec.Authentication.Type {
case spannerv1beta1.AuthTypeSA:
if iamKeySecret.Name == "" {
return nil, errFetchServiceAccountJSONNoNameSpecified
}
if iamKeySecret.Key == "" {
return nil, errFetchServiceAccountJSONNoKeySpecified
}
var namespace string
// TODO: move this to 'defaulting' webhook
if iamKeySecret.Namespace == "" {
namespace = sa.Namespace
} else {
namespace = iamKeySecret.Namespace
}
var secret corev1.Secret
key := ctrlclient.ObjectKey{
Name: iamKeySecret.Name,
Namespace: namespace,
}
if err := r.apiReader.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
return nil, errFetchServiceAccountJSONNoSecretFound
}
return nil, err | serviceAccountJSON, ok := secret.Data[iamKeySecret.Key]
if !ok {
return nil, errFetchServiceAccountJSONNoSecretDataFound
}
return syncer.NewServiceAccountJSONCredentials(serviceAccountJSON), nil
case spannerv1beta1.AuthTypeImpersonation:
return syncer.NewServiceAccountImpersonate(impersonateConfig.TargetServiceAccount, impersonateConfig.Delegates), nil
default:
return syncer.NewADCCredentials(), nil
}
} | }
| random_line_split |
spannerautoscaler_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"sync"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
ctrlbuilder "sigs.k8s.io/controller-runtime/pkg/builder"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
ctrlmanager "sigs.k8s.io/controller-runtime/pkg/manager"
ctrlreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
spannerv1beta1 "github.com/mercari/spanner-autoscaler/api/v1beta1"
"github.com/mercari/spanner-autoscaler/pkg/logging"
"github.com/mercari/spanner-autoscaler/pkg/spanner"
"github.com/mercari/spanner-autoscaler/pkg/syncer"
)
var (
errFetchServiceAccountJSONNoNameSpecified = errors.New("no name specified")
errFetchServiceAccountJSONNoKeySpecified = errors.New("no key specified")
errFetchServiceAccountJSONNoSecretFound = errors.New("no secret found by specified name")
errFetchServiceAccountJSONNoSecretDataFound = errors.New("no secret found by specified key")
errInvalidExclusiveCredentials = errors.New("impersonateConfig and iamKeySecret are mutually exclusive")
)
// TODO: move this to 'defaulting' webhook
const defaultScaledownStepSize = 2
// SpannerAutoscalerReconciler reconciles a SpannerAutoscaler object.
type SpannerAutoscalerReconciler struct {
ctrlClient ctrlclient.Client
apiReader ctrlclient.Reader
scheme *runtime.Scheme
recorder record.EventRecorder
syncers map[types.NamespacedName]syncer.Syncer
scaleDownInterval time.Duration
clock utilclock.Clock
log logr.Logger
mu sync.RWMutex
}
var _ ctrlreconcile.Reconciler = (*SpannerAutoscalerReconciler)(nil)
type Option func(*SpannerAutoscalerReconciler)
func WithSyncers(syncers map[types.NamespacedName]syncer.Syncer) Option {
return func(r *SpannerAutoscalerReconciler) {
r.syncers = syncers
}
}
func WithScaleDownInterval(scaleDownInterval time.Duration) Option {
return func(r *SpannerAutoscalerReconciler) {
r.scaleDownInterval = scaleDownInterval
}
}
func WithClock(clock utilclock.Clock) Option {
return func(r *SpannerAutoscalerReconciler) {
r.clock = clock
}
}
func WithLog(log logr.Logger) Option {
return func(r *SpannerAutoscalerReconciler) {
r.log = log.WithName("spannerautoscaler")
}
}
// NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler.
func NewSpannerAutoscalerReconciler(
ctrlClient ctrlclient.Client,
apiReader ctrlclient.Reader,
scheme *runtime.Scheme,
recorder record.EventRecorder,
logger logr.Logger,
opts ...Option,
) *SpannerAutoscalerReconciler {
r := &SpannerAutoscalerReconciler{
ctrlClient: ctrlClient,
apiReader: apiReader,
scheme: scheme,
recorder: recorder,
syncers: make(map[types.NamespacedName]syncer.Syncer),
scaleDownInterval: 55 * time.Minute,
clock: utilclock.RealClock{},
log: logger,
}
for _, opt := range opts {
opt(r)
}
return r
}
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get,resourceNames=spanner-autoscaler-gcp-sa
// Reconcile implements ctrlreconcile.Reconciler.
func (r *SpannerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrlreconcile.Request) (ctrlreconcile.Result, error) {
nn := req.NamespacedName
log := r.log.WithValues("namespaced name", nn)
r.mu.RLock()
s, syncerExists := r.syncers[nn]
r.mu.RUnlock()
var sa spannerv1beta1.SpannerAutoscaler
if err := r.ctrlClient.Get(ctx, nn, &sa); err != nil {
err = ctrlclient.IgnoreNotFound(err)
if err != nil {
log.Error(err, "failed to get spanner-autoscaler")
return ctrlreconcile.Result{}, err
}
log.V(2).Info("checking if a syncer exists")
if syncerExists {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
log.Info("stopped syncer")
}
return ctrlreconcile.Result{}, nil
}
// TODO: move this to the defaulting webhook
if sa.Spec.ScaleConfig.ScaledownStepSize == 0 {
sa.Spec.ScaleConfig.ScaledownStepSize = defaultScaledownStepSize
}
log.V(1).Info("resource status", "spannerautoscaler", sa)
credentials, err := r.fetchCredentials(ctx, &sa)
if err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "ServiceAccountRequired", err.Error())
log.Error(err, "failed to fetch service account")
return ctrlreconcile.Result{}, err
}
// If the syncer does not exist, start a syncer.
if !syncerExists {
log.V(2).Info("syncer does not exist, starting a new syncer")
ctx = logging.WithContext(ctx, log)
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// If target spanner instance or service account have been changed, then just replace syncer.
if s.UpdateTarget(sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials) {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
log.Info("replaced syncer", "namespaced name", sa)
return ctrlreconcile.Result{}, nil
}
log.V(1).Info("checking to see if we need to calculate processing units", "sa", sa)
if !r.needCalcProcessingUnits(&sa) {
return ctrlreconcile.Result{}, nil
}
// TODO: change this to pass the object instead of so many parameters
desiredProcessingUnits := calcDesiredProcessingUnits(
sa.Status.CurrentHighPriorityCPUUtilization,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.TargetCPUUtilization.HighPriority,
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Min, sa.Spec.ScaleConfig.Nodes.Min, sa.Spec.ScaleConfig.ComputeType),
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Max, sa.Spec.ScaleConfig.Nodes.Max, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.ScaledownStepSize,
)
now := r.clock.Now()
log.V(1).Info("processing units need to be changed", "desiredProcessingUnits", desiredProcessingUnits, "sa.Status", sa.Status)
if !r.needUpdateProcessingUnits(&sa, desiredProcessingUnits, now) {
return ctrlreconcile.Result{}, nil
}
if err := s.UpdateInstance(ctx, desiredProcessingUnits); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateInstance", err.Error())
log.Error(err, "failed to update spanner instance status")
return ctrlreconcile.Result{}, err
}
r.recorder.Eventf(&sa, corev1.EventTypeNormal, "Updated", "Updated processing units of %s/%s from %d to %d", sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), desiredProcessingUnits)
log.Info("updated nodes via google cloud api", "before", normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), "after", desiredProcessingUnits)
saCopy := sa.DeepCopy()
saCopy.Status.DesiredProcessingUnits = desiredProcessingUnits
saCopy.Status.DesiredNodes = desiredProcessingUnits / 1000
saCopy.Status.LastScaleTime = metav1.Time{Time: now}
if err = r.ctrlClient.Status().Update(ctx, saCopy); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
log.Error(err, "failed to update spanner autoscaler status")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// TODO: convert all internal computations to processing units only
func normalizeProcessingUnitsOrNodes(pu, nodes int, computeType spannerv1beta1.ComputeType) int {
switch computeType {
case spannerv1beta1.ComputeTypePU:
return pu
case spannerv1beta1.ComputeTypeNode:
return nodes * 1000
default:
return -1
}
}
// SetupWithManager sets up the controller with ctrlmanager.Manager.
func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {
opts := ctrlcontroller.Options{
Reconciler: r,
}
return ctrlbuilder.ControllerManagedBy(mgr).
For(&spannerv1beta1.SpannerAutoscaler{}).
WithOptions(opts).
Complete(r)
}
func (r *SpannerAutoscalerReconciler) startSyncer(ctx context.Context, nn types.NamespacedName, projectID, instanceID string, credentials *syncer.Credentials) error {
log := logging.FromContext(ctx)
s, err := syncer.New(ctx, r.ctrlClient, nn, projectID, instanceID, credentials, r.recorder, syncer.WithLog(log))
if err != nil {
return err
}
go s.Start()
r.mu.Lock()
r.syncers[nn] = s
r.mu.Unlock()
log.V(1).Info("added syncer")
return nil
}
func (r *SpannerAutoscalerReconciler) needCalcProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler) bool {
log := r.log
switch {
// TODO: Fix this to use only processing units
case sa.Status.CurrentProcessingUnits == 0 && sa.Status.CurrentNodes == 0:
log.Info("current processing units have not fetched yet")
return false
case sa.Status.InstanceState != spanner.StateReady:
log.Info("instance state is not ready")
return false
default:
return true
}
}
func (r *SpannerAutoscalerReconciler) needUpdateProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler, desiredProcessingUnits int, now time.Time) bool {
log := r.log
currentProcessingUnits := normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType)
switch {
case desiredProcessingUnits == currentProcessingUnits:
log.V(0).Info("the desired number of processing units is equal to that of the current; no need to scale")
return false
case desiredProcessingUnits > currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(10*time.Second)):
log.Info("too short to scale up since instance scaled last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
case desiredProcessingUnits < currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(r.scaleDownInterval)):
log.Info("too short to scale down since instance scaled nodes last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
default:
return true
}
}
// For testing purpose only
func calcDesiredNodes(currentCPU, currentNodes, targetCPU, minNodes, maxNodes, scaledownStepSize int) int {
return calcDesiredProcessingUnits(currentCPU, currentNodes*1000, targetCPU, minNodes*1000, maxNodes*1000, scaledownStepSize) / 1000
}
// nextValidProcessingUnits finds next valid value in processing units.
// https://cloud.google.com/spanner/docs/compute-capacity?hl=en
// Valid values are
// If processingUnits < 1000, processing units must be multiples of 100.
// If processingUnits >= 1000, processing units must be multiples of 1000.
func nextValidProcessingUnits(processingUnits int) int {
if processingUnits < 1000 {
return ((processingUnits / 100) + 1) * 100
}
return ((processingUnits / 1000) + 1) * 1000
}
func maxInt(first int, rest ...int) int {
result := first
for _, v := range rest {
if result < v {
result = v
}
}
return result
}
// calcDesiredProcessingUnits calculates the values needed to keep CPU utilization below TargetCPU.
func calcDesiredProcessingUnits(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize int) int |
func (r *SpannerAutoscalerReconciler) fetchCredentials(ctx context.Context, sa *spannerv1beta1.SpannerAutoscaler) (*syncer.Credentials, error) {
iamKeySecret := sa.Spec.Authentication.IAMKeySecret
impersonateConfig := sa.Spec.Authentication.ImpersonateConfig
// TODO: move this to 'validating' webhook
if iamKeySecret != nil && impersonateConfig != nil {
return nil, errInvalidExclusiveCredentials
}
switch sa.Spec.Authentication.Type {
case spannerv1beta1.AuthTypeSA:
if iamKeySecret.Name == "" {
return nil, errFetchServiceAccountJSONNoNameSpecified
}
if iamKeySecret.Key == "" {
return nil, errFetchServiceAccountJSONNoKeySpecified
}
var namespace string
// TODO: move this to 'defaulting' webhook
if iamKeySecret.Namespace == "" {
namespace = sa.Namespace
} else {
namespace = iamKeySecret.Namespace
}
var secret corev1.Secret
key := ctrlclient.ObjectKey{
Name: iamKeySecret.Name,
Namespace: namespace,
}
if err := r.apiReader.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
return nil, errFetchServiceAccountJSONNoSecretFound
}
return nil, err
}
serviceAccountJSON, ok := secret.Data[iamKeySecret.Key]
if !ok {
return nil, errFetchServiceAccountJSONNoSecretDataFound
}
return syncer.NewServiceAccountJSONCredentials(serviceAccountJSON), nil
case spannerv1beta1.AuthTypeImpersonation:
return syncer.NewServiceAccountImpersonate(impersonateConfig.TargetServiceAccount, impersonateConfig.Delegates), nil
default:
return syncer.NewADCCredentials(), nil
}
}
| {
totalCPUProduct1000 := currentCPU * currentProcessingUnits
desiredProcessingUnits := maxInt(nextValidProcessingUnits(totalCPUProduct1000/targetCPU), currentProcessingUnits-scaledownStepSize*1000)
switch {
case desiredProcessingUnits < minProcessingUnits:
return minProcessingUnits
case desiredProcessingUnits > maxProcessingUnits:
return maxProcessingUnits
default:
return desiredProcessingUnits
}
} | identifier_body |
spannerautoscaler_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"sync"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
ctrlbuilder "sigs.k8s.io/controller-runtime/pkg/builder"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
ctrlmanager "sigs.k8s.io/controller-runtime/pkg/manager"
ctrlreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
spannerv1beta1 "github.com/mercari/spanner-autoscaler/api/v1beta1"
"github.com/mercari/spanner-autoscaler/pkg/logging"
"github.com/mercari/spanner-autoscaler/pkg/spanner"
"github.com/mercari/spanner-autoscaler/pkg/syncer"
)
var (
errFetchServiceAccountJSONNoNameSpecified = errors.New("no name specified")
errFetchServiceAccountJSONNoKeySpecified = errors.New("no key specified")
errFetchServiceAccountJSONNoSecretFound = errors.New("no secret found by specified name")
errFetchServiceAccountJSONNoSecretDataFound = errors.New("no secret found by specified key")
errInvalidExclusiveCredentials = errors.New("impersonateConfig and iamKeySecret are mutually exclusive")
)
// TODO: move this to 'defaulting' webhook
const defaultScaledownStepSize = 2
// SpannerAutoscalerReconciler reconciles a SpannerAutoscaler object.
type SpannerAutoscalerReconciler struct {
ctrlClient ctrlclient.Client
apiReader ctrlclient.Reader
scheme *runtime.Scheme
recorder record.EventRecorder
syncers map[types.NamespacedName]syncer.Syncer
scaleDownInterval time.Duration
clock utilclock.Clock
log logr.Logger
mu sync.RWMutex
}
var _ ctrlreconcile.Reconciler = (*SpannerAutoscalerReconciler)(nil)
type Option func(*SpannerAutoscalerReconciler)
func WithSyncers(syncers map[types.NamespacedName]syncer.Syncer) Option {
return func(r *SpannerAutoscalerReconciler) {
r.syncers = syncers
}
}
func WithScaleDownInterval(scaleDownInterval time.Duration) Option {
return func(r *SpannerAutoscalerReconciler) {
r.scaleDownInterval = scaleDownInterval
}
}
func WithClock(clock utilclock.Clock) Option {
return func(r *SpannerAutoscalerReconciler) {
r.clock = clock
}
}
func WithLog(log logr.Logger) Option {
return func(r *SpannerAutoscalerReconciler) {
r.log = log.WithName("spannerautoscaler")
}
}
// NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler.
func NewSpannerAutoscalerReconciler(
ctrlClient ctrlclient.Client,
apiReader ctrlclient.Reader,
scheme *runtime.Scheme,
recorder record.EventRecorder,
logger logr.Logger,
opts ...Option,
) *SpannerAutoscalerReconciler {
r := &SpannerAutoscalerReconciler{
ctrlClient: ctrlClient,
apiReader: apiReader,
scheme: scheme,
recorder: recorder,
syncers: make(map[types.NamespacedName]syncer.Syncer),
scaleDownInterval: 55 * time.Minute,
clock: utilclock.RealClock{},
log: logger,
}
for _, opt := range opts {
opt(r)
}
return r
}
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get,resourceNames=spanner-autoscaler-gcp-sa
// Reconcile implements ctrlreconcile.Reconciler.
func (r *SpannerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrlreconcile.Request) (ctrlreconcile.Result, error) {
nn := req.NamespacedName
log := r.log.WithValues("namespaced name", nn)
r.mu.RLock()
s, syncerExists := r.syncers[nn]
r.mu.RUnlock()
var sa spannerv1beta1.SpannerAutoscaler
if err := r.ctrlClient.Get(ctx, nn, &sa); err != nil {
err = ctrlclient.IgnoreNotFound(err)
if err != nil {
log.Error(err, "failed to get spanner-autoscaler")
return ctrlreconcile.Result{}, err
}
log.V(2).Info("checking if a syncer exists")
if syncerExists |
return ctrlreconcile.Result{}, nil
}
// TODO: move this to the defaulting webhook
if sa.Spec.ScaleConfig.ScaledownStepSize == 0 {
sa.Spec.ScaleConfig.ScaledownStepSize = defaultScaledownStepSize
}
log.V(1).Info("resource status", "spannerautoscaler", sa)
credentials, err := r.fetchCredentials(ctx, &sa)
if err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "ServiceAccountRequired", err.Error())
log.Error(err, "failed to fetch service account")
return ctrlreconcile.Result{}, err
}
// If the syncer does not exist, start a syncer.
if !syncerExists {
log.V(2).Info("syncer does not exist, starting a new syncer")
ctx = logging.WithContext(ctx, log)
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// If target spanner instance or service account have been changed, then just replace syncer.
if s.UpdateTarget(sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials) {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
log.Info("replaced syncer", "namespaced name", sa)
return ctrlreconcile.Result{}, nil
}
log.V(1).Info("checking to see if we need to calculate processing units", "sa", sa)
if !r.needCalcProcessingUnits(&sa) {
return ctrlreconcile.Result{}, nil
}
// TODO: change this to pass the object instead of so many parameters
desiredProcessingUnits := calcDesiredProcessingUnits(
sa.Status.CurrentHighPriorityCPUUtilization,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.TargetCPUUtilization.HighPriority,
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Min, sa.Spec.ScaleConfig.Nodes.Min, sa.Spec.ScaleConfig.ComputeType),
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Max, sa.Spec.ScaleConfig.Nodes.Max, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.ScaledownStepSize,
)
now := r.clock.Now()
log.V(1).Info("processing units need to be changed", "desiredProcessingUnits", desiredProcessingUnits, "sa.Status", sa.Status)
if !r.needUpdateProcessingUnits(&sa, desiredProcessingUnits, now) {
return ctrlreconcile.Result{}, nil
}
if err := s.UpdateInstance(ctx, desiredProcessingUnits); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateInstance", err.Error())
log.Error(err, "failed to update spanner instance status")
return ctrlreconcile.Result{}, err
}
r.recorder.Eventf(&sa, corev1.EventTypeNormal, "Updated", "Updated processing units of %s/%s from %d to %d", sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), desiredProcessingUnits)
log.Info("updated nodes via google cloud api", "before", normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), "after", desiredProcessingUnits)
saCopy := sa.DeepCopy()
saCopy.Status.DesiredProcessingUnits = desiredProcessingUnits
saCopy.Status.DesiredNodes = desiredProcessingUnits / 1000
saCopy.Status.LastScaleTime = metav1.Time{Time: now}
if err = r.ctrlClient.Status().Update(ctx, saCopy); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
log.Error(err, "failed to update spanner autoscaler status")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// TODO: convert all internal computations to processing units only
func normalizeProcessingUnitsOrNodes(pu, nodes int, computeType spannerv1beta1.ComputeType) int {
switch computeType {
case spannerv1beta1.ComputeTypePU:
return pu
case spannerv1beta1.ComputeTypeNode:
return nodes * 1000
default:
return -1
}
}
// SetupWithManager sets up the controller with ctrlmanager.Manager.
func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {
opts := ctrlcontroller.Options{
Reconciler: r,
}
return ctrlbuilder.ControllerManagedBy(mgr).
For(&spannerv1beta1.SpannerAutoscaler{}).
WithOptions(opts).
Complete(r)
}
func (r *SpannerAutoscalerReconciler) startSyncer(ctx context.Context, nn types.NamespacedName, projectID, instanceID string, credentials *syncer.Credentials) error {
log := logging.FromContext(ctx)
s, err := syncer.New(ctx, r.ctrlClient, nn, projectID, instanceID, credentials, r.recorder, syncer.WithLog(log))
if err != nil {
return err
}
go s.Start()
r.mu.Lock()
r.syncers[nn] = s
r.mu.Unlock()
log.V(1).Info("added syncer")
return nil
}
func (r *SpannerAutoscalerReconciler) needCalcProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler) bool {
log := r.log
switch {
// TODO: Fix this to use only processing units
case sa.Status.CurrentProcessingUnits == 0 && sa.Status.CurrentNodes == 0:
log.Info("current processing units have not fetched yet")
return false
case sa.Status.InstanceState != spanner.StateReady:
log.Info("instance state is not ready")
return false
default:
return true
}
}
func (r *SpannerAutoscalerReconciler) needUpdateProcessingUnits(sa *spannerv1beta1.SpannerAutoscaler, desiredProcessingUnits int, now time.Time) bool {
log := r.log
currentProcessingUnits := normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType)
switch {
case desiredProcessingUnits == currentProcessingUnits:
log.V(0).Info("the desired number of processing units is equal to that of the current; no need to scale")
return false
case desiredProcessingUnits > currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(10*time.Second)):
log.Info("too short to scale up since instance scaled last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
case desiredProcessingUnits < currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(r.scaleDownInterval)):
log.Info("too short to scale down since instance scaled nodes last",
"now", now.String(),
"last scale time", sa.Status.LastScaleTime,
)
return false
default:
return true
}
}
// For testing purpose only
func calcDesiredNodes(currentCPU, currentNodes, targetCPU, minNodes, maxNodes, scaledownStepSize int) int {
return calcDesiredProcessingUnits(currentCPU, currentNodes*1000, targetCPU, minNodes*1000, maxNodes*1000, scaledownStepSize) / 1000
}
// nextValidProcessingUnits finds next valid value in processing units.
// https://cloud.google.com/spanner/docs/compute-capacity?hl=en
// Valid values are
// If processingUnits < 1000, processing units must be multiples of 100.
// If processingUnits >= 1000, processing units must be multiples of 1000.
func nextValidProcessingUnits(processingUnits int) int {
if processingUnits < 1000 {
return ((processingUnits / 100) + 1) * 100
}
return ((processingUnits / 1000) + 1) * 1000
}
func maxInt(first int, rest ...int) int {
result := first
for _, v := range rest {
if result < v {
result = v
}
}
return result
}
// calcDesiredProcessingUnits calculates the values needed to keep CPU utilization below TargetCPU.
func calcDesiredProcessingUnits(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize int) int {
totalCPUProduct1000 := currentCPU * currentProcessingUnits
desiredProcessingUnits := maxInt(nextValidProcessingUnits(totalCPUProduct1000/targetCPU), currentProcessingUnits-scaledownStepSize*1000)
switch {
case desiredProcessingUnits < minProcessingUnits:
return minProcessingUnits
case desiredProcessingUnits > maxProcessingUnits:
return maxProcessingUnits
default:
return desiredProcessingUnits
}
}
func (r *SpannerAutoscalerReconciler) fetchCredentials(ctx context.Context, sa *spannerv1beta1.SpannerAutoscaler) (*syncer.Credentials, error) {
iamKeySecret := sa.Spec.Authentication.IAMKeySecret
impersonateConfig := sa.Spec.Authentication.ImpersonateConfig
// TODO: move this to 'validating' webhook
if iamKeySecret != nil && impersonateConfig != nil {
return nil, errInvalidExclusiveCredentials
}
switch sa.Spec.Authentication.Type {
case spannerv1beta1.AuthTypeSA:
if iamKeySecret.Name == "" {
return nil, errFetchServiceAccountJSONNoNameSpecified
}
if iamKeySecret.Key == "" {
return nil, errFetchServiceAccountJSONNoKeySpecified
}
var namespace string
// TODO: move this to 'defaulting' webhook
if iamKeySecret.Namespace == "" {
namespace = sa.Namespace
} else {
namespace = iamKeySecret.Namespace
}
var secret corev1.Secret
key := ctrlclient.ObjectKey{
Name: iamKeySecret.Name,
Namespace: namespace,
}
if err := r.apiReader.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
return nil, errFetchServiceAccountJSONNoSecretFound
}
return nil, err
}
serviceAccountJSON, ok := secret.Data[iamKeySecret.Key]
if !ok {
return nil, errFetchServiceAccountJSONNoSecretDataFound
}
return syncer.NewServiceAccountJSONCredentials(serviceAccountJSON), nil
case spannerv1beta1.AuthTypeImpersonation:
return syncer.NewServiceAccountImpersonate(impersonateConfig.TargetServiceAccount, impersonateConfig.Delegates), nil
default:
return syncer.NewADCCredentials(), nil
}
}
| {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
log.Info("stopped syncer")
} | conditional_block |
query.go | package frontend
import (
"fmt"
"math"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/alpacahq/marketstore/v4/catalog"
"github.com/alpacahq/marketstore/v4/executor"
"github.com/alpacahq/marketstore/v4/planner"
"github.com/alpacahq/marketstore/v4/sqlparser"
"github.com/alpacahq/marketstore/v4/utils"
"github.com/alpacahq/marketstore/v4/utils/io"
"github.com/alpacahq/marketstore/v4/utils/log"
)
// This is the parameter interface for DataService.Query method.
type QueryRequest struct {
// Note: SQL is not fully supported
IsSQLStatement bool `msgpack:"is_sqlstatement"` // If this is a SQL request, Only SQLStatement is relevant
SQLStatement string `msgpack:"sql_statement"`
// Destination is <symbol>/<timeframe>/<attributegroup>
Destination string `msgpack:"destination"`
// This is not usually set, defaults to Symbol/Timeframe/AttributeGroup
KeyCategory string `msgpack:"key_category,omitempty"`
// Lower time predicate (i.e. index >= start) in unix epoch second
EpochStart *int64 `msgpack:"epoch_start,omitempty"`
// Nanosecond of the lower time predicate
EpochStartNanos *int64 `msgpack:"epoch_start_nanos,omitempty"`
// Upper time predicate (i.e. index <= end) in unix epoch second
EpochEnd *int64 `msgpack:"epoch_end,omitempty"`
// Nanosecond of the upper time predicate
EpochEndNanos *int64 `msgpack:"epoch_end_nanos,omitempty"`
// Number of max returned rows from lower/upper bound
LimitRecordCount *int `msgpack:"limit_record_count,omitempty"`
// Set to true if LimitRecordCount should be from the lower
LimitFromStart *bool `msgpack:"limit_from_start,omitempty"`
// Array of column names to be returned
Columns []string `msgpack:"columns,omitempty"`
// Support for functions is experimental and subject to change
Functions []string `msgpack:"functions,omitempty"`
}
type MultiQueryRequest struct {
/*
A multi-request allows for different Timeframes and record formats for each request
*/
Requests []QueryRequest `msgpack:"requests"`
}
type QueryResponse struct {
Result *io.NumpyMultiDataset `msgpack:"result"`
}
type MultiQueryResponse struct {
Responses []QueryResponse `msgpack:"responses"`
Version string `msgpack:"version"` // Server Version
Timezone string `msgpack:"timezone"` // Server Timezone
}
// ToColumnSeriesMap converts a MultiQueryResponse to a
// ColumnSeriesMap, returning an error if there is any
// issue encountered while converting.
func (resp *MultiQueryResponse) ToColumnSeriesMap() (*io.ColumnSeriesMap, error) {
if resp == nil {
return nil, nil
}
csm := io.NewColumnSeriesMap()
for _, ds := range resp.Responses { // Datasets are packed in a slice, each has a NumpyMultiDataset inside
nmds := ds.Result
for tbkStr, startIndex := range nmds.StartIndex {
cs, err := nmds.ToColumnSeries(startIndex, nmds.Lengths[tbkStr])
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(tbkStr)
csm[*tbk] = cs
}
}
return &csm, nil
}
func (s *DataService) Query(r *http.Request, reqs *MultiQueryRequest, response *MultiQueryResponse) (err error) {
response.Version = utils.GitHash
response.Timezone = utils.InstanceConfig.Timezone.String()
for i := range reqs.Requests {
var (
resp *QueryResponse
err error
)
// SQL
if reqs.Requests[i].IsSQLStatement {
resp, err = s.executeSQL(reqs.Requests[i].SQLStatement)
if err != nil { | } else {
// Query
resp, err = s.executeQuery(&reqs.Requests[i])
if err != nil {
return err
}
}
response.Responses = append(response.Responses, *resp)
}
return nil
}
func (s *DataService) executeSQL(sqlStatement string) (*QueryResponse, error) {
queryTree, err := sqlparser.BuildQueryTree(sqlStatement)
if err != nil {
return nil, err
}
es, err := sqlparser.NewExecutableStatement(queryTree)
if err != nil {
return nil, err
}
cs, err := es.Materialize(s.aggRunner, s.catalogDir)
if err != nil {
return nil, err
}
nds, err := io.NewNumpyDataset(cs)
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(sqlStatement + ":SQL")
nmds, err := io.NewNumpyMultiDataset(nds, *tbk)
if err != nil {
return nil, err
}
return &QueryResponse{nmds}, nil
}
func (s *DataService) executeQuery(req *QueryRequest) (*QueryResponse, error) {
/*
Assumption: Within each TimeBucketKey, we have one or more of each category, with the exception of
the AttributeGroup (aka Record Format) and Timeframe
Within each TimeBucketKey in the request, we allow for a comma separated list of items, e.g.:
destination1.items := "TSLA,AAPL,CG/1Min/OHLCV"
Constraints:
- If there is more than one record format in a single destination, we return an error
- If there is more than one Timeframe in a single destination, we return an error
*/
dest := io.NewTimeBucketKey(req.Destination, req.KeyCategory)
/*
All destinations in a request must share the same record format (AttributeGroup) and Timeframe
*/
RecordFormat := dest.GetItemInCategory("AttributeGroup")
Timeframe := dest.GetItemInCategory("Timeframe")
Symbols := dest.GetMultiItemInCategory("Symbol")
if len(Timeframe) == 0 || len(RecordFormat) == 0 || len(Symbols) == 0 {
return nil, fmt.Errorf("destinations must have a Symbol, Timeframe and AttributeGroup, have: %s",
dest.String())
} else if len(Symbols) == 1 && Symbols[0] == "*" {
// replace the * "symbol" with a list all known actual symbols
symbols, err := gatherAllSymbols(s.catalogDir)
if err != nil {
return nil, err
}
keyParts := []string{strings.Join(symbols, ","), Timeframe, RecordFormat}
itemKey := strings.Join(keyParts, "/")
dest = io.NewTimeBucketKey(itemKey, req.KeyCategory)
}
epochStart := int64(0)
epochEnd := int64(math.MaxInt64)
var epochStartNanos, epochEndNanos int64
if req.EpochStart != nil {
epochStart = *req.EpochStart
if req.EpochStartNanos != nil {
epochStartNanos = *req.EpochStartNanos
}
}
if req.EpochEnd != nil {
epochEnd = *req.EpochEnd
if req.EpochEndNanos != nil {
epochEndNanos = *req.EpochEndNanos
}
}
limitRecordCount := 0
if req.LimitRecordCount != nil {
limitRecordCount = *req.LimitRecordCount
}
limitFromStart := false
if req.LimitFromStart != nil {
limitFromStart = *req.LimitFromStart
}
columns := make([]string, 0)
if req.Columns != nil {
columns = req.Columns
}
start := io.ToSystemTimezone(time.Unix(epochStart, epochStartNanos))
end := io.ToSystemTimezone(time.Unix(epochEnd, epochEndNanos))
csm, err := s.query.ExecuteQuery(
dest,
start, end,
limitRecordCount, limitFromStart,
columns,
)
if err != nil {
return nil, err
}
/*
Execute function pipeline, if requested
*/
if len(req.Functions) != 0 {
for tbkStr, cs := range csm {
csOut, err2 := s.aggRunner.Run(req.Functions, cs, tbkStr)
if err2 != nil {
return nil, err2
}
csm[tbkStr] = csOut
}
}
/*
Separate each TimeBucket from the result and compose a NumpyMultiDataset
*/
var nmds *io.NumpyMultiDataset
for tbk, cs := range csm {
nds, err2 := io.NewNumpyDataset(cs)
if err != nil {
return nil, err2
}
if nmds == nil {
nmds, err = io.NewNumpyMultiDataset(nds, tbk)
if err != nil {
return nil, err
}
} else {
err3 := nmds.Append(cs, tbk)
if err3 != nil {
return nil, fmt.Errorf("symbols in a query must have the same data type "+
"or be filtered by common columns. symbols=%v", csm.GetMetadataKeys(),
)
}
}
}
return &QueryResponse{nmds}, nil
}
type ListSymbolsResponse struct {
Results []string
}
type ListSymbolsRequest struct {
// "symbol", or "tbk"
Format string `msgpack:"format,omitempty"`
}
func (s *DataService) ListSymbols(r *http.Request, req *ListSymbolsRequest, response *ListSymbolsResponse) (err error) {
if atomic.LoadUint32(&Queryable) == 0 {
return errNotQueryable
}
// TBK format (e.g. ["AMZN/1Min/TICK", "AAPL/1Sec/OHLCV", ...])
if req != nil && req.Format == "tbk" {
response.Results = catalog.ListTimeBucketKeyNames(s.catalogDir)
return nil
}
// Symbol format (e.g. ["AMZN", "AAPL", ...])
ret, err := s.catalogDir.GatherCategoriesAndItems()
if err != nil {
return fmt.Errorf("gather categories and items from catalog dir to list symbols: %w", err)
}
symbols := ret["Symbol"]
response.Results = make([]string, len(symbols))
cnt := 0
for symbol := range symbols {
response.Results[cnt] = symbol
cnt++
}
return nil
}
/*
Utility functions
*/
type QueryService struct {
catalogDir *catalog.Directory
}
func NewQueryService(catDir *catalog.Directory) *QueryService {
return &QueryService{
catalogDir: catDir,
}
}
func (qs *QueryService) ExecuteQuery(tbk *io.TimeBucketKey, start, end time.Time, limitRecordCount int,
limitFromStart bool, columns []string,
) (io.ColumnSeriesMap, error) {
query := planner.NewQuery(qs.catalogDir)
/*
Alter timeframe inside key to ensure it matches a queryable TF
*/
tf := tbk.GetItemInCategory("Timeframe")
cd, err := utils.CandleDurationFromString(tf)
if err != nil {
return nil, fmt.Errorf("timeframe not found in TimeBucketKey=%s: %w", tbk.String(), err)
}
queryableTimeframe := cd.QueryableTimeframe()
tbk.SetItemInCategory("Timeframe", queryableTimeframe)
query.AddTargetKey(tbk)
if limitRecordCount != 0 {
direction := io.LAST
if limitFromStart {
direction = io.FIRST
}
query.SetRowLimit(
direction,
cd.QueryableNrecords(
queryableTimeframe,
limitRecordCount,
),
)
}
query.SetRange(start, end)
parseResult, err := query.Parse()
if err != nil {
// No results from query
if err.Error() == "no files returned from query parse" {
log.Info("no results returned from query: Target: %v, start, end: %v,%v limitRecordCount: %v",
tbk.String(), start, end, limitRecordCount)
} else {
log.Error("Parsing query: %s\n", err)
}
return nil, err
}
scanner, err := executor.NewReader(parseResult)
if err != nil {
log.Error("Unable to create scanner: %s\n", err)
return nil, err
}
csm, err := scanner.Read()
if err != nil {
log.Error("Error returned from query scanner: %s\n", err)
return nil, err
}
csm.FilterColumns(columns)
return csm, err
} | return err
} | random_line_split |
query.go | package frontend
import (
"fmt"
"math"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/alpacahq/marketstore/v4/catalog"
"github.com/alpacahq/marketstore/v4/executor"
"github.com/alpacahq/marketstore/v4/planner"
"github.com/alpacahq/marketstore/v4/sqlparser"
"github.com/alpacahq/marketstore/v4/utils"
"github.com/alpacahq/marketstore/v4/utils/io"
"github.com/alpacahq/marketstore/v4/utils/log"
)
// This is the parameter interface for DataService.Query method.
type QueryRequest struct {
// Note: SQL is not fully supported
IsSQLStatement bool `msgpack:"is_sqlstatement"` // If this is a SQL request, Only SQLStatement is relevant
SQLStatement string `msgpack:"sql_statement"`
// Destination is <symbol>/<timeframe>/<attributegroup>
Destination string `msgpack:"destination"`
// This is not usually set, defaults to Symbol/Timeframe/AttributeGroup
KeyCategory string `msgpack:"key_category,omitempty"`
// Lower time predicate (i.e. index >= start) in unix epoch second
EpochStart *int64 `msgpack:"epoch_start,omitempty"`
// Nanosecond of the lower time predicate
EpochStartNanos *int64 `msgpack:"epoch_start_nanos,omitempty"`
// Upper time predicate (i.e. index <= end) in unix epoch second
EpochEnd *int64 `msgpack:"epoch_end,omitempty"`
// Nanosecond of the upper time predicate
EpochEndNanos *int64 `msgpack:"epoch_end_nanos,omitempty"`
// Number of max returned rows from lower/upper bound
LimitRecordCount *int `msgpack:"limit_record_count,omitempty"`
// Set to true if LimitRecordCount should be from the lower
LimitFromStart *bool `msgpack:"limit_from_start,omitempty"`
// Array of column names to be returned
Columns []string `msgpack:"columns,omitempty"`
// Support for functions is experimental and subject to change
Functions []string `msgpack:"functions,omitempty"`
}
type MultiQueryRequest struct {
/*
A multi-request allows for different Timeframes and record formats for each request
*/
Requests []QueryRequest `msgpack:"requests"`
}
type QueryResponse struct {
Result *io.NumpyMultiDataset `msgpack:"result"`
}
type MultiQueryResponse struct {
Responses []QueryResponse `msgpack:"responses"`
Version string `msgpack:"version"` // Server Version
Timezone string `msgpack:"timezone"` // Server Timezone
}
// ToColumnSeriesMap converts a MultiQueryResponse to a
// ColumnSeriesMap, returning an error if there is any
// issue encountered while converting.
func (resp *MultiQueryResponse) ToColumnSeriesMap() (*io.ColumnSeriesMap, error) {
if resp == nil {
return nil, nil
}
csm := io.NewColumnSeriesMap()
for _, ds := range resp.Responses { // Datasets are packed in a slice, each has a NumpyMultiDataset inside
nmds := ds.Result
for tbkStr, startIndex := range nmds.StartIndex {
cs, err := nmds.ToColumnSeries(startIndex, nmds.Lengths[tbkStr])
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(tbkStr)
csm[*tbk] = cs
}
}
return &csm, nil
}
func (s *DataService) Query(r *http.Request, reqs *MultiQueryRequest, response *MultiQueryResponse) (err error) {
response.Version = utils.GitHash
response.Timezone = utils.InstanceConfig.Timezone.String()
for i := range reqs.Requests {
var (
resp *QueryResponse
err error
)
// SQL
if reqs.Requests[i].IsSQLStatement {
resp, err = s.executeSQL(reqs.Requests[i].SQLStatement)
if err != nil {
return err
}
} else {
// Query
resp, err = s.executeQuery(&reqs.Requests[i])
if err != nil {
return err
}
}
response.Responses = append(response.Responses, *resp)
}
return nil
}
func (s *DataService) executeSQL(sqlStatement string) (*QueryResponse, error) {
queryTree, err := sqlparser.BuildQueryTree(sqlStatement)
if err != nil {
return nil, err
}
es, err := sqlparser.NewExecutableStatement(queryTree)
if err != nil {
return nil, err
}
cs, err := es.Materialize(s.aggRunner, s.catalogDir)
if err != nil {
return nil, err
}
nds, err := io.NewNumpyDataset(cs)
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(sqlStatement + ":SQL")
nmds, err := io.NewNumpyMultiDataset(nds, *tbk)
if err != nil {
return nil, err
}
return &QueryResponse{nmds}, nil
}
func (s *DataService) executeQuery(req *QueryRequest) (*QueryResponse, error) {
/*
Assumption: Within each TimeBucketKey, we have one or more of each category, with the exception of
the AttributeGroup (aka Record Format) and Timeframe
Within each TimeBucketKey in the request, we allow for a comma separated list of items, e.g.:
destination1.items := "TSLA,AAPL,CG/1Min/OHLCV"
Constraints:
- If there is more than one record format in a single destination, we return an error
- If there is more than one Timeframe in a single destination, we return an error
*/
dest := io.NewTimeBucketKey(req.Destination, req.KeyCategory)
/*
All destinations in a request must share the same record format (AttributeGroup) and Timeframe
*/
RecordFormat := dest.GetItemInCategory("AttributeGroup")
Timeframe := dest.GetItemInCategory("Timeframe")
Symbols := dest.GetMultiItemInCategory("Symbol")
if len(Timeframe) == 0 || len(RecordFormat) == 0 || len(Symbols) == 0 {
return nil, fmt.Errorf("destinations must have a Symbol, Timeframe and AttributeGroup, have: %s",
dest.String())
} else if len(Symbols) == 1 && Symbols[0] == "*" {
// replace the * "symbol" with a list all known actual symbols
symbols, err := gatherAllSymbols(s.catalogDir)
if err != nil {
return nil, err
}
keyParts := []string{strings.Join(symbols, ","), Timeframe, RecordFormat}
itemKey := strings.Join(keyParts, "/")
dest = io.NewTimeBucketKey(itemKey, req.KeyCategory)
}
epochStart := int64(0)
epochEnd := int64(math.MaxInt64)
var epochStartNanos, epochEndNanos int64
if req.EpochStart != nil {
epochStart = *req.EpochStart
if req.EpochStartNanos != nil {
epochStartNanos = *req.EpochStartNanos
}
}
if req.EpochEnd != nil {
epochEnd = *req.EpochEnd
if req.EpochEndNanos != nil {
epochEndNanos = *req.EpochEndNanos
}
}
limitRecordCount := 0
if req.LimitRecordCount != nil {
limitRecordCount = *req.LimitRecordCount
}
limitFromStart := false
if req.LimitFromStart != nil {
limitFromStart = *req.LimitFromStart
}
columns := make([]string, 0)
if req.Columns != nil {
columns = req.Columns
}
start := io.ToSystemTimezone(time.Unix(epochStart, epochStartNanos))
end := io.ToSystemTimezone(time.Unix(epochEnd, epochEndNanos))
csm, err := s.query.ExecuteQuery(
dest,
start, end,
limitRecordCount, limitFromStart,
columns,
)
if err != nil {
return nil, err
}
/*
Execute function pipeline, if requested
*/
if len(req.Functions) != 0 {
for tbkStr, cs := range csm {
csOut, err2 := s.aggRunner.Run(req.Functions, cs, tbkStr)
if err2 != nil {
return nil, err2
}
csm[tbkStr] = csOut
}
}
/*
Separate each TimeBucket from the result and compose a NumpyMultiDataset
*/
var nmds *io.NumpyMultiDataset
for tbk, cs := range csm {
nds, err2 := io.NewNumpyDataset(cs)
if err != nil {
return nil, err2
}
if nmds == nil {
nmds, err = io.NewNumpyMultiDataset(nds, tbk)
if err != nil {
return nil, err
}
} else {
err3 := nmds.Append(cs, tbk)
if err3 != nil {
return nil, fmt.Errorf("symbols in a query must have the same data type "+
"or be filtered by common columns. symbols=%v", csm.GetMetadataKeys(),
)
}
}
}
return &QueryResponse{nmds}, nil
}
type ListSymbolsResponse struct {
Results []string
}
type ListSymbolsRequest struct {
// "symbol", or "tbk"
Format string `msgpack:"format,omitempty"`
}
func (s *DataService) ListSymbols(r *http.Request, req *ListSymbolsRequest, response *ListSymbolsResponse) (err error) {
if atomic.LoadUint32(&Queryable) == 0 {
return errNotQueryable
}
// TBK format (e.g. ["AMZN/1Min/TICK", "AAPL/1Sec/OHLCV", ...])
if req != nil && req.Format == "tbk" {
response.Results = catalog.ListTimeBucketKeyNames(s.catalogDir)
return nil
}
// Symbol format (e.g. ["AMZN", "AAPL", ...])
ret, err := s.catalogDir.GatherCategoriesAndItems()
if err != nil {
return fmt.Errorf("gather categories and items from catalog dir to list symbols: %w", err)
}
symbols := ret["Symbol"]
response.Results = make([]string, len(symbols))
cnt := 0
for symbol := range symbols {
response.Results[cnt] = symbol
cnt++
}
return nil
}
/*
Utility functions
*/
type QueryService struct {
catalogDir *catalog.Directory
}
func NewQueryService(catDir *catalog.Directory) *QueryService |
func (qs *QueryService) ExecuteQuery(tbk *io.TimeBucketKey, start, end time.Time, limitRecordCount int,
limitFromStart bool, columns []string,
) (io.ColumnSeriesMap, error) {
query := planner.NewQuery(qs.catalogDir)
/*
Alter timeframe inside key to ensure it matches a queryable TF
*/
tf := tbk.GetItemInCategory("Timeframe")
cd, err := utils.CandleDurationFromString(tf)
if err != nil {
return nil, fmt.Errorf("timeframe not found in TimeBucketKey=%s: %w", tbk.String(), err)
}
queryableTimeframe := cd.QueryableTimeframe()
tbk.SetItemInCategory("Timeframe", queryableTimeframe)
query.AddTargetKey(tbk)
if limitRecordCount != 0 {
direction := io.LAST
if limitFromStart {
direction = io.FIRST
}
query.SetRowLimit(
direction,
cd.QueryableNrecords(
queryableTimeframe,
limitRecordCount,
),
)
}
query.SetRange(start, end)
parseResult, err := query.Parse()
if err != nil {
// No results from query
if err.Error() == "no files returned from query parse" {
log.Info("no results returned from query: Target: %v, start, end: %v,%v limitRecordCount: %v",
tbk.String(), start, end, limitRecordCount)
} else {
log.Error("Parsing query: %s\n", err)
}
return nil, err
}
scanner, err := executor.NewReader(parseResult)
if err != nil {
log.Error("Unable to create scanner: %s\n", err)
return nil, err
}
csm, err := scanner.Read()
if err != nil {
log.Error("Error returned from query scanner: %s\n", err)
return nil, err
}
csm.FilterColumns(columns)
return csm, err
}
| {
return &QueryService{
catalogDir: catDir,
}
} | identifier_body |
query.go | package frontend
import (
"fmt"
"math"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/alpacahq/marketstore/v4/catalog"
"github.com/alpacahq/marketstore/v4/executor"
"github.com/alpacahq/marketstore/v4/planner"
"github.com/alpacahq/marketstore/v4/sqlparser"
"github.com/alpacahq/marketstore/v4/utils"
"github.com/alpacahq/marketstore/v4/utils/io"
"github.com/alpacahq/marketstore/v4/utils/log"
)
// This is the parameter interface for DataService.Query method.
type QueryRequest struct {
// Note: SQL is not fully supported
IsSQLStatement bool `msgpack:"is_sqlstatement"` // If this is a SQL request, Only SQLStatement is relevant
SQLStatement string `msgpack:"sql_statement"`
// Destination is <symbol>/<timeframe>/<attributegroup>
Destination string `msgpack:"destination"`
// This is not usually set, defaults to Symbol/Timeframe/AttributeGroup
KeyCategory string `msgpack:"key_category,omitempty"`
// Lower time predicate (i.e. index >= start) in unix epoch second
EpochStart *int64 `msgpack:"epoch_start,omitempty"`
// Nanosecond of the lower time predicate
EpochStartNanos *int64 `msgpack:"epoch_start_nanos,omitempty"`
// Upper time predicate (i.e. index <= end) in unix epoch second
EpochEnd *int64 `msgpack:"epoch_end,omitempty"`
// Nanosecond of the upper time predicate
EpochEndNanos *int64 `msgpack:"epoch_end_nanos,omitempty"`
// Number of max returned rows from lower/upper bound
LimitRecordCount *int `msgpack:"limit_record_count,omitempty"`
// Set to true if LimitRecordCount should be from the lower
LimitFromStart *bool `msgpack:"limit_from_start,omitempty"`
// Array of column names to be returned
Columns []string `msgpack:"columns,omitempty"`
// Support for functions is experimental and subject to change
Functions []string `msgpack:"functions,omitempty"`
}
type MultiQueryRequest struct {
/*
A multi-request allows for different Timeframes and record formats for each request
*/
Requests []QueryRequest `msgpack:"requests"`
}
type QueryResponse struct {
Result *io.NumpyMultiDataset `msgpack:"result"`
}
type MultiQueryResponse struct {
Responses []QueryResponse `msgpack:"responses"`
Version string `msgpack:"version"` // Server Version
Timezone string `msgpack:"timezone"` // Server Timezone
}
// ToColumnSeriesMap converts a MultiQueryResponse to a
// ColumnSeriesMap, returning an error if there is any
// issue encountered while converting.
func (resp *MultiQueryResponse) ToColumnSeriesMap() (*io.ColumnSeriesMap, error) {
if resp == nil |
csm := io.NewColumnSeriesMap()
for _, ds := range resp.Responses { // Datasets are packed in a slice, each has a NumpyMultiDataset inside
nmds := ds.Result
for tbkStr, startIndex := range nmds.StartIndex {
cs, err := nmds.ToColumnSeries(startIndex, nmds.Lengths[tbkStr])
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(tbkStr)
csm[*tbk] = cs
}
}
return &csm, nil
}
func (s *DataService) Query(r *http.Request, reqs *MultiQueryRequest, response *MultiQueryResponse) (err error) {
response.Version = utils.GitHash
response.Timezone = utils.InstanceConfig.Timezone.String()
for i := range reqs.Requests {
var (
resp *QueryResponse
err error
)
// SQL
if reqs.Requests[i].IsSQLStatement {
resp, err = s.executeSQL(reqs.Requests[i].SQLStatement)
if err != nil {
return err
}
} else {
// Query
resp, err = s.executeQuery(&reqs.Requests[i])
if err != nil {
return err
}
}
response.Responses = append(response.Responses, *resp)
}
return nil
}
func (s *DataService) executeSQL(sqlStatement string) (*QueryResponse, error) {
queryTree, err := sqlparser.BuildQueryTree(sqlStatement)
if err != nil {
return nil, err
}
es, err := sqlparser.NewExecutableStatement(queryTree)
if err != nil {
return nil, err
}
cs, err := es.Materialize(s.aggRunner, s.catalogDir)
if err != nil {
return nil, err
}
nds, err := io.NewNumpyDataset(cs)
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(sqlStatement + ":SQL")
nmds, err := io.NewNumpyMultiDataset(nds, *tbk)
if err != nil {
return nil, err
}
return &QueryResponse{nmds}, nil
}
func (s *DataService) executeQuery(req *QueryRequest) (*QueryResponse, error) {
/*
Assumption: Within each TimeBucketKey, we have one or more of each category, with the exception of
the AttributeGroup (aka Record Format) and Timeframe
Within each TimeBucketKey in the request, we allow for a comma separated list of items, e.g.:
destination1.items := "TSLA,AAPL,CG/1Min/OHLCV"
Constraints:
- If there is more than one record format in a single destination, we return an error
- If there is more than one Timeframe in a single destination, we return an error
*/
dest := io.NewTimeBucketKey(req.Destination, req.KeyCategory)
/*
All destinations in a request must share the same record format (AttributeGroup) and Timeframe
*/
RecordFormat := dest.GetItemInCategory("AttributeGroup")
Timeframe := dest.GetItemInCategory("Timeframe")
Symbols := dest.GetMultiItemInCategory("Symbol")
if len(Timeframe) == 0 || len(RecordFormat) == 0 || len(Symbols) == 0 {
return nil, fmt.Errorf("destinations must have a Symbol, Timeframe and AttributeGroup, have: %s",
dest.String())
} else if len(Symbols) == 1 && Symbols[0] == "*" {
// replace the * "symbol" with a list all known actual symbols
symbols, err := gatherAllSymbols(s.catalogDir)
if err != nil {
return nil, err
}
keyParts := []string{strings.Join(symbols, ","), Timeframe, RecordFormat}
itemKey := strings.Join(keyParts, "/")
dest = io.NewTimeBucketKey(itemKey, req.KeyCategory)
}
epochStart := int64(0)
epochEnd := int64(math.MaxInt64)
var epochStartNanos, epochEndNanos int64
if req.EpochStart != nil {
epochStart = *req.EpochStart
if req.EpochStartNanos != nil {
epochStartNanos = *req.EpochStartNanos
}
}
if req.EpochEnd != nil {
epochEnd = *req.EpochEnd
if req.EpochEndNanos != nil {
epochEndNanos = *req.EpochEndNanos
}
}
limitRecordCount := 0
if req.LimitRecordCount != nil {
limitRecordCount = *req.LimitRecordCount
}
limitFromStart := false
if req.LimitFromStart != nil {
limitFromStart = *req.LimitFromStart
}
columns := make([]string, 0)
if req.Columns != nil {
columns = req.Columns
}
start := io.ToSystemTimezone(time.Unix(epochStart, epochStartNanos))
end := io.ToSystemTimezone(time.Unix(epochEnd, epochEndNanos))
csm, err := s.query.ExecuteQuery(
dest,
start, end,
limitRecordCount, limitFromStart,
columns,
)
if err != nil {
return nil, err
}
/*
Execute function pipeline, if requested
*/
if len(req.Functions) != 0 {
for tbkStr, cs := range csm {
csOut, err2 := s.aggRunner.Run(req.Functions, cs, tbkStr)
if err2 != nil {
return nil, err2
}
csm[tbkStr] = csOut
}
}
/*
Separate each TimeBucket from the result and compose a NumpyMultiDataset
*/
var nmds *io.NumpyMultiDataset
for tbk, cs := range csm {
nds, err2 := io.NewNumpyDataset(cs)
if err != nil {
return nil, err2
}
if nmds == nil {
nmds, err = io.NewNumpyMultiDataset(nds, tbk)
if err != nil {
return nil, err
}
} else {
err3 := nmds.Append(cs, tbk)
if err3 != nil {
return nil, fmt.Errorf("symbols in a query must have the same data type "+
"or be filtered by common columns. symbols=%v", csm.GetMetadataKeys(),
)
}
}
}
return &QueryResponse{nmds}, nil
}
type ListSymbolsResponse struct {
Results []string
}
type ListSymbolsRequest struct {
// "symbol", or "tbk"
Format string `msgpack:"format,omitempty"`
}
func (s *DataService) ListSymbols(r *http.Request, req *ListSymbolsRequest, response *ListSymbolsResponse) (err error) {
if atomic.LoadUint32(&Queryable) == 0 {
return errNotQueryable
}
// TBK format (e.g. ["AMZN/1Min/TICK", "AAPL/1Sec/OHLCV", ...])
if req != nil && req.Format == "tbk" {
response.Results = catalog.ListTimeBucketKeyNames(s.catalogDir)
return nil
}
// Symbol format (e.g. ["AMZN", "AAPL", ...])
ret, err := s.catalogDir.GatherCategoriesAndItems()
if err != nil {
return fmt.Errorf("gather categories and items from catalog dir to list symbols: %w", err)
}
symbols := ret["Symbol"]
response.Results = make([]string, len(symbols))
cnt := 0
for symbol := range symbols {
response.Results[cnt] = symbol
cnt++
}
return nil
}
/*
Utility functions
*/
type QueryService struct {
catalogDir *catalog.Directory
}
func NewQueryService(catDir *catalog.Directory) *QueryService {
return &QueryService{
catalogDir: catDir,
}
}
func (qs *QueryService) ExecuteQuery(tbk *io.TimeBucketKey, start, end time.Time, limitRecordCount int,
limitFromStart bool, columns []string,
) (io.ColumnSeriesMap, error) {
query := planner.NewQuery(qs.catalogDir)
/*
Alter timeframe inside key to ensure it matches a queryable TF
*/
tf := tbk.GetItemInCategory("Timeframe")
cd, err := utils.CandleDurationFromString(tf)
if err != nil {
return nil, fmt.Errorf("timeframe not found in TimeBucketKey=%s: %w", tbk.String(), err)
}
queryableTimeframe := cd.QueryableTimeframe()
tbk.SetItemInCategory("Timeframe", queryableTimeframe)
query.AddTargetKey(tbk)
if limitRecordCount != 0 {
direction := io.LAST
if limitFromStart {
direction = io.FIRST
}
query.SetRowLimit(
direction,
cd.QueryableNrecords(
queryableTimeframe,
limitRecordCount,
),
)
}
query.SetRange(start, end)
parseResult, err := query.Parse()
if err != nil {
// No results from query
if err.Error() == "no files returned from query parse" {
log.Info("no results returned from query: Target: %v, start, end: %v,%v limitRecordCount: %v",
tbk.String(), start, end, limitRecordCount)
} else {
log.Error("Parsing query: %s\n", err)
}
return nil, err
}
scanner, err := executor.NewReader(parseResult)
if err != nil {
log.Error("Unable to create scanner: %s\n", err)
return nil, err
}
csm, err := scanner.Read()
if err != nil {
log.Error("Error returned from query scanner: %s\n", err)
return nil, err
}
csm.FilterColumns(columns)
return csm, err
}
| {
return nil, nil
} | conditional_block |
query.go | package frontend
import (
"fmt"
"math"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/alpacahq/marketstore/v4/catalog"
"github.com/alpacahq/marketstore/v4/executor"
"github.com/alpacahq/marketstore/v4/planner"
"github.com/alpacahq/marketstore/v4/sqlparser"
"github.com/alpacahq/marketstore/v4/utils"
"github.com/alpacahq/marketstore/v4/utils/io"
"github.com/alpacahq/marketstore/v4/utils/log"
)
// This is the parameter interface for DataService.Query method.
type QueryRequest struct {
// Note: SQL is not fully supported
IsSQLStatement bool `msgpack:"is_sqlstatement"` // If this is a SQL request, Only SQLStatement is relevant
SQLStatement string `msgpack:"sql_statement"`
// Destination is <symbol>/<timeframe>/<attributegroup>
Destination string `msgpack:"destination"`
// This is not usually set, defaults to Symbol/Timeframe/AttributeGroup
KeyCategory string `msgpack:"key_category,omitempty"`
// Lower time predicate (i.e. index >= start) in unix epoch second
EpochStart *int64 `msgpack:"epoch_start,omitempty"`
// Nanosecond of the lower time predicate
EpochStartNanos *int64 `msgpack:"epoch_start_nanos,omitempty"`
// Upper time predicate (i.e. index <= end) in unix epoch second
EpochEnd *int64 `msgpack:"epoch_end,omitempty"`
// Nanosecond of the upper time predicate
EpochEndNanos *int64 `msgpack:"epoch_end_nanos,omitempty"`
// Number of max returned rows from lower/upper bound
LimitRecordCount *int `msgpack:"limit_record_count,omitempty"`
// Set to true if LimitRecordCount should be from the lower
LimitFromStart *bool `msgpack:"limit_from_start,omitempty"`
// Array of column names to be returned
Columns []string `msgpack:"columns,omitempty"`
// Support for functions is experimental and subject to change
Functions []string `msgpack:"functions,omitempty"`
}
type MultiQueryRequest struct {
/*
A multi-request allows for different Timeframes and record formats for each request
*/
Requests []QueryRequest `msgpack:"requests"`
}
type QueryResponse struct {
Result *io.NumpyMultiDataset `msgpack:"result"`
}
type MultiQueryResponse struct {
Responses []QueryResponse `msgpack:"responses"`
Version string `msgpack:"version"` // Server Version
Timezone string `msgpack:"timezone"` // Server Timezone
}
// ToColumnSeriesMap converts a MultiQueryResponse to a
// ColumnSeriesMap, returning an error if there is any
// issue encountered while converting.
func (resp *MultiQueryResponse) ToColumnSeriesMap() (*io.ColumnSeriesMap, error) {
if resp == nil {
return nil, nil
}
csm := io.NewColumnSeriesMap()
for _, ds := range resp.Responses { // Datasets are packed in a slice, each has a NumpyMultiDataset inside
nmds := ds.Result
for tbkStr, startIndex := range nmds.StartIndex {
cs, err := nmds.ToColumnSeries(startIndex, nmds.Lengths[tbkStr])
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(tbkStr)
csm[*tbk] = cs
}
}
return &csm, nil
}
func (s *DataService) Query(r *http.Request, reqs *MultiQueryRequest, response *MultiQueryResponse) (err error) {
response.Version = utils.GitHash
response.Timezone = utils.InstanceConfig.Timezone.String()
for i := range reqs.Requests {
var (
resp *QueryResponse
err error
)
// SQL
if reqs.Requests[i].IsSQLStatement {
resp, err = s.executeSQL(reqs.Requests[i].SQLStatement)
if err != nil {
return err
}
} else {
// Query
resp, err = s.executeQuery(&reqs.Requests[i])
if err != nil {
return err
}
}
response.Responses = append(response.Responses, *resp)
}
return nil
}
func (s *DataService) executeSQL(sqlStatement string) (*QueryResponse, error) {
queryTree, err := sqlparser.BuildQueryTree(sqlStatement)
if err != nil {
return nil, err
}
es, err := sqlparser.NewExecutableStatement(queryTree)
if err != nil {
return nil, err
}
cs, err := es.Materialize(s.aggRunner, s.catalogDir)
if err != nil {
return nil, err
}
nds, err := io.NewNumpyDataset(cs)
if err != nil {
return nil, err
}
tbk := io.NewTimeBucketKeyFromString(sqlStatement + ":SQL")
nmds, err := io.NewNumpyMultiDataset(nds, *tbk)
if err != nil {
return nil, err
}
return &QueryResponse{nmds}, nil
}
func (s *DataService) | (req *QueryRequest) (*QueryResponse, error) {
/*
Assumption: Within each TimeBucketKey, we have one or more of each category, with the exception of
the AttributeGroup (aka Record Format) and Timeframe
Within each TimeBucketKey in the request, we allow for a comma separated list of items, e.g.:
destination1.items := "TSLA,AAPL,CG/1Min/OHLCV"
Constraints:
- If there is more than one record format in a single destination, we return an error
- If there is more than one Timeframe in a single destination, we return an error
*/
dest := io.NewTimeBucketKey(req.Destination, req.KeyCategory)
/*
All destinations in a request must share the same record format (AttributeGroup) and Timeframe
*/
RecordFormat := dest.GetItemInCategory("AttributeGroup")
Timeframe := dest.GetItemInCategory("Timeframe")
Symbols := dest.GetMultiItemInCategory("Symbol")
if len(Timeframe) == 0 || len(RecordFormat) == 0 || len(Symbols) == 0 {
return nil, fmt.Errorf("destinations must have a Symbol, Timeframe and AttributeGroup, have: %s",
dest.String())
} else if len(Symbols) == 1 && Symbols[0] == "*" {
// replace the * "symbol" with a list all known actual symbols
symbols, err := gatherAllSymbols(s.catalogDir)
if err != nil {
return nil, err
}
keyParts := []string{strings.Join(symbols, ","), Timeframe, RecordFormat}
itemKey := strings.Join(keyParts, "/")
dest = io.NewTimeBucketKey(itemKey, req.KeyCategory)
}
epochStart := int64(0)
epochEnd := int64(math.MaxInt64)
var epochStartNanos, epochEndNanos int64
if req.EpochStart != nil {
epochStart = *req.EpochStart
if req.EpochStartNanos != nil {
epochStartNanos = *req.EpochStartNanos
}
}
if req.EpochEnd != nil {
epochEnd = *req.EpochEnd
if req.EpochEndNanos != nil {
epochEndNanos = *req.EpochEndNanos
}
}
limitRecordCount := 0
if req.LimitRecordCount != nil {
limitRecordCount = *req.LimitRecordCount
}
limitFromStart := false
if req.LimitFromStart != nil {
limitFromStart = *req.LimitFromStart
}
columns := make([]string, 0)
if req.Columns != nil {
columns = req.Columns
}
start := io.ToSystemTimezone(time.Unix(epochStart, epochStartNanos))
end := io.ToSystemTimezone(time.Unix(epochEnd, epochEndNanos))
csm, err := s.query.ExecuteQuery(
dest,
start, end,
limitRecordCount, limitFromStart,
columns,
)
if err != nil {
return nil, err
}
/*
Execute function pipeline, if requested
*/
if len(req.Functions) != 0 {
for tbkStr, cs := range csm {
csOut, err2 := s.aggRunner.Run(req.Functions, cs, tbkStr)
if err2 != nil {
return nil, err2
}
csm[tbkStr] = csOut
}
}
/*
Separate each TimeBucket from the result and compose a NumpyMultiDataset
*/
var nmds *io.NumpyMultiDataset
for tbk, cs := range csm {
nds, err2 := io.NewNumpyDataset(cs)
if err != nil {
return nil, err2
}
if nmds == nil {
nmds, err = io.NewNumpyMultiDataset(nds, tbk)
if err != nil {
return nil, err
}
} else {
err3 := nmds.Append(cs, tbk)
if err3 != nil {
return nil, fmt.Errorf("symbols in a query must have the same data type "+
"or be filtered by common columns. symbols=%v", csm.GetMetadataKeys(),
)
}
}
}
return &QueryResponse{nmds}, nil
}
type ListSymbolsResponse struct {
Results []string
}
type ListSymbolsRequest struct {
// "symbol", or "tbk"
Format string `msgpack:"format,omitempty"`
}
func (s *DataService) ListSymbols(r *http.Request, req *ListSymbolsRequest, response *ListSymbolsResponse) (err error) {
if atomic.LoadUint32(&Queryable) == 0 {
return errNotQueryable
}
// TBK format (e.g. ["AMZN/1Min/TICK", "AAPL/1Sec/OHLCV", ...])
if req != nil && req.Format == "tbk" {
response.Results = catalog.ListTimeBucketKeyNames(s.catalogDir)
return nil
}
// Symbol format (e.g. ["AMZN", "AAPL", ...])
ret, err := s.catalogDir.GatherCategoriesAndItems()
if err != nil {
return fmt.Errorf("gather categories and items from catalog dir to list symbols: %w", err)
}
symbols := ret["Symbol"]
response.Results = make([]string, len(symbols))
cnt := 0
for symbol := range symbols {
response.Results[cnt] = symbol
cnt++
}
return nil
}
/*
Utility functions
*/
type QueryService struct {
catalogDir *catalog.Directory
}
func NewQueryService(catDir *catalog.Directory) *QueryService {
return &QueryService{
catalogDir: catDir,
}
}
func (qs *QueryService) ExecuteQuery(tbk *io.TimeBucketKey, start, end time.Time, limitRecordCount int,
limitFromStart bool, columns []string,
) (io.ColumnSeriesMap, error) {
query := planner.NewQuery(qs.catalogDir)
/*
Alter timeframe inside key to ensure it matches a queryable TF
*/
tf := tbk.GetItemInCategory("Timeframe")
cd, err := utils.CandleDurationFromString(tf)
if err != nil {
return nil, fmt.Errorf("timeframe not found in TimeBucketKey=%s: %w", tbk.String(), err)
}
queryableTimeframe := cd.QueryableTimeframe()
tbk.SetItemInCategory("Timeframe", queryableTimeframe)
query.AddTargetKey(tbk)
if limitRecordCount != 0 {
direction := io.LAST
if limitFromStart {
direction = io.FIRST
}
query.SetRowLimit(
direction,
cd.QueryableNrecords(
queryableTimeframe,
limitRecordCount,
),
)
}
query.SetRange(start, end)
parseResult, err := query.Parse()
if err != nil {
// No results from query
if err.Error() == "no files returned from query parse" {
log.Info("no results returned from query: Target: %v, start, end: %v,%v limitRecordCount: %v",
tbk.String(), start, end, limitRecordCount)
} else {
log.Error("Parsing query: %s\n", err)
}
return nil, err
}
scanner, err := executor.NewReader(parseResult)
if err != nil {
log.Error("Unable to create scanner: %s\n", err)
return nil, err
}
csm, err := scanner.Read()
if err != nil {
log.Error("Error returned from query scanner: %s\n", err)
return nil, err
}
csm.FilterColumns(columns)
return csm, err
}
| executeQuery | identifier_name |
d05.go | package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime/pprof"
"strconv"
"sync"
"unsafe"
)
// area represents an area with its name and code.
type area struct {
name []byte
code []byte
}
// employee represents an employee with associate name, surname, salary and
// area.
type employee struct {
name []byte
surname []byte
salary float64
areaCode []byte
}
// salaryStats contains a list of employees with the biggest and lowest
// salaries, as well as info on the total sum of salary they receive, the number
// of employees considered and the salary average for this whole group.
type salaryStats struct {
// biggest has a lsit of employees that receive the biggest salary.
biggest []*employee
// lowest has a list of employees that receive the lowest salary.
lowest []*employee
// employeeCount has the number of employees considered in this group.
employeeCount uint32
// total is the sum of salaries of the employees in this group.
total float64
// average is the salary average of employees in this group.
average float64
}
// groupSalaryStats containes a list of employees and an associated salaryStats
// for the considered group.
type groupSalaryStats struct {
// employees is a list of employees in this group.
employees []*employee
// salaries is the stats for this group.
salaries salaryStats
}
// groupSalaryStatsMap is a map of groupSalaryStats.
type groupSalaryStatsMap map[string]*groupSalaryStats
// d05 represents a solution for this problem.
type d05 struct {
// areas contains the areas the employees work at.
areas map[string]area
// salaries contains stats on the salaries for the whole group.
salaries salaryStats
// employeeCount is the number of employees.
employeeCount uint32
// salaryByArea maps the areas with a group stats.
salaryByArea groupSalaryStatsMap
// salaryBySurname maps the surnames with a group stats.
salaryBySurname groupSalaryStatsMap
// employeesByArea maps the areas with a list of employees.
employeesByArea map[string][]*employee
}
const (
// numberOfBlocksDefault is the default number of concurrent blocks the JSON
// input file will be broken into for processing.
numberOfBlocksDefault = 16
)
// WARNING: DO NOT DO THIS AT HOME.
func unsafeString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// newD05 creates a new `d05' value to be used as either a partial or global
// solution for the problem.
func newD05() *d05 {
return &d05{employeesByArea: map[string][]*employee{}, salaryByArea: map[string]*groupSalaryStats{}, salaryBySurname: map[string]*groupSalaryStats{}, areas: map[string]area{}}
}
// merge gets a salaryStats and merge with the current one.
func (s *salaryStats) merge(salary *salaryStats) {
s.employeeCount += salary.employeeCount
s.total += salary.total
s.average = s.total / float64(s.employeeCount)
if len(s.lowest) == 0 || (len(salary.lowest) > 0 && salary.lowest[0].salary < s.lowest[0].salary) {
s.lowest = append([]*employee{}, salary.lowest...)
} else if (len(s.lowest) > 0 && len(salary.lowest) > 0) && s.lowest[0].salary == salary.lowest[0].salary {
s.lowest = append(s.lowest, salary.lowest...)
}
if len(s.biggest) == 0 || (len(salary.biggest) > 0 && salary.biggest[0].salary > s.biggest[0].salary) {
s.biggest = append([]*employee{}, salary.biggest...)
} else if (len(s.biggest) > 0 && len(salary.biggest) > 0) && s.biggest[0].salary == salary.biggest[0].salary {
s.biggest = append(s.biggest, salary.biggest...)
}
}
// merge receives a groupSalaryStats and merges with the current one.
func (m groupSalaryStatsMap) merge(key string, src *groupSalaryStats) {
if s, ok := m[key]; ok {
s.salaries.merge(&src.salaries)
return
}
m[key] = src
}
// stats receives partial solutions from `block' channel and updates the global
// solution.
func (d *d05) stats(wg *sync.WaitGroup, block chan *d05) {
for {
select {
case partialSolution := <-block:
d.employeeCount += partialSolution.employeeCount
for k := range partialSolution.areas {
d.areas[k] = partialSolution.areas[k]
}
for k := range partialSolution.employeesByArea {
d.employeesByArea[k] = append(d.employeesByArea[k], partialSolution.employeesByArea[k]...)
}
for k, v := range partialSolution.salaryBySurname {
d.salaryBySurname.merge(k, v)
}
for k, v := range partialSolution.salaryByArea {
d.salaryByArea.merge(k, v)
}
d.salaries.merge(&partialSolution.salaries)
wg.Done()
}
}
}
// updateSalaries updates the salary stats after processing the info of a single
// employee.
func (d *d05) updateSalaries(s *salaryStats, e *employee) {
if len(s.biggest) == 0 || e.salary > s.biggest[0].salary {
s.biggest = []*employee{e}
} else if s.biggest[0].salary == e.salary {
s.biggest = append(s.biggest, e)
}
if len(s.lowest) == 0 || e.salary < s.lowest[0].salary {
s.lowest = []*employee{e}
} else if s.lowest[0].salary == e.salary {
s.lowest = append(s.lowest, e)
}
s.employeeCount++
s.total += e.salary
s.average = s.total / float64(s.employeeCount)
}
// calculateSalaries updates a groupSalaryStats map after processing the info of
// a single employee.
func (d *d05) calculateSalaries(s map[string]*groupSalaryStats, key *string, e *employee) {
gs, ok := s[*key]
if !ok {
gs = &groupSalaryStats{}
s[*key] = gs
}
d.updateSalaries(&gs.salaries, e)
}
// processEmployees receive an employee and updates the associated stats.
func (d *d05) processEmployee(e *employee) {
area := unsafeString(e.areaCode)
surname := unsafeString(e.surname)
d.updateSalaries(&d.salaries, e)
d.calculateSalaries(d.salaryBySurname, &surname, e)
d.calculateSalaries(d.salaryByArea, &area, e)
_, ok := d.employeesByArea[area]
if !ok {
d.employeesByArea[area] = []*employee{}
}
d.employeesByArea[area] = append(d.employeesByArea[area], e)
d.employeeCount++
}
// parseArea parses an area from the input JSON file.
func (d *d05) parseArea(data []byte) {
totalQuotes := 0
var current uint32
var previous uint32
a := area{}
doublequote := byte('"')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], doublequote); idx == -1 {
break
}
totalQuotes++
previous = current
current = i + uint32(idx)
i = current + 1
switch totalQuotes {
// {"codigo":"SM", "nome":"Gerenciamento de Software"}
case 4:
a.code = make([]byte, current-previous-1)
copy(a.code, data[previous+1:current])
case 8:
a.name = make([]byte, current-previous-1)
copy(a.name, data[previous+1:current])
d.areas[unsafeString(a.code)] = a
return
}
}
}
// parseEmployee parses an employee from the input JSON file. If the received
// data is not an employee, it calls parseArea instead.
func (d *d05) parseEmployee(data []byte, start, end uint32) {
totalQuotes := 0
var current uint32
var previous uint32
e := employee{}
for i := start; i < end; i++ {
if data[i] != '"' {
continue
}
totalQuotes++
previous = current
current = i
switch totalQuotes {
// {"id":1,"nome":"Aahron","sobrenome":"Abaine","salario":68379.29,"area":"PI"}
case 2:
// Checking if it is an employee.
if !bytes.Equal([]byte("id"), data[previous+1:current]) {
d.parseArea(data[start : end+1])
return
}
case 6:
e.name = make([]byte, current-previous-1)
copy(e.name, data[previous+1:current])
case 10:
e.surname = make([]byte, current-previous-1)
copy(e.surname, data[previous+1:current])
case 13:
j := current - 2
for ; j > previous; j-- {
if data[j] >= '0' && data[j] <= '9' {
break
}
}
salary, err := strconv.ParseFloat(unsafeString(data[previous+2:j+1]), 64)
if err != nil {
log.Printf("oops: error converting %q to float: %v\n", data[previous+2:j+1], err)
}
e.salary = salary
case 16:
e.areaCode = make([]byte, current-previous-1)
copy(e.areaCode, data[previous+1:current])
d.processEmployee(&e)
return
}
}
}
// parseJSONBlock parses a block of JSON data from the input file. This method
// will be run concurrently and generate a partial solution from the data it
// processes, then send the result via the `block' channel.
func (d *d05) parseJSONBlock(data []byte, block chan *d05) {
var start uint32
partialSolution := newD05()
openbracket := byte('{')
closedbracket := byte('}')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], openbracket); idx == -1 {
break
}
start = i + uint32(idx)
i = start
if idx = bytes.IndexByte(data[i:], closedbracket); idx == -1 {
break
}
i += uint32(idx)
partialSolution.parseEmployee(data, start, i)
i++
}
block <- partialSolution
}
// parseJSON receives the full JSON data from the input file and calls
// `parseJSONBlocks' to process the smaller blocks. It returns the global
// solution for the problem at hand, once the partial solutions have all been
// accounted for.
func parseJSON(data []byte, blocksToUse uint32) *d05 {
solution := newD05()
block := make(chan *d05)
wg := sync.WaitGroup{}
// Goroutine that will receive the partial solutions.
go solution.stats(&wg, block)
// An average step to form the blocks.
step := uint32(len(data)) / blocksToUse
size := uint32(len(data))
i := step
start := uint32(1)
bracket := byte('{')
var idx int
for p := uint32(0); p < blocksToUse-1; p++ {
for i < size {
if idx = bytes.IndexByte(data[i:], bracket); idx == -1 {
break
}
wg.Add(1)
i += uint32(idx)
go solution.parseJSONBlock(data[start:i-1], block)
start = i
i += step
break
}
}
// Last block.
wg.Add(1)
go solution.parseJSONBlock(data[start:], block)
wg.Wait()
return solution
}
// solve prints the solution for the problem, once everything is done.
func (d *d05) solve() {
wg := sync.WaitGroup{}
wg.Add(5)
go func() {
for i, size := uint32(0), uint32(len(d.salaries.biggest)); i < size; i++ {
fmt.Printf("global_max|%s %s|%.2f\n", d.salaries.biggest[i].name, d.salaries.biggest[i].surname, d.salaries.biggest[i].salary)
}
wg.Done()
}()
go func() {
for i, size := uint32(0), uint32(len(d.salaries.lowest)); i < size; i++ {
fmt.Printf("global_min|%s %s|%.2f\n", d.salaries.lowest[i].name, d.salaries.lowest[i].surname, d.salaries.lowest[i].salary)
}
wg.Done()
}()
fmt.Printf("global_avg|%.2f\n", d.salaries.average)
go func() {
var i uint32
var size uint32
for areaCode, byArea := range d.salaryByArea {
for i, size = 0, uint32(len(byArea.salaries.biggest)); i < size; i++ |
for i, size = 0, uint32(len(byArea.salaries.lowest)); i < size; i++ {
fmt.Printf("area_min|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.lowest[i].name, byArea.salaries.lowest[i].surname, byArea.salaries.lowest[i].salary)
}
fmt.Printf("area_avg|%s|%.2f\n", d.areas[areaCode].name, byArea.salaries.average)
}
wg.Done()
}()
go func() {
lessEmployees := d.employeeCount
mostEmployees := uint32(0)
count := uint32(0)
for _, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count < lessEmployees {
lessEmployees = count
}
if count > mostEmployees {
mostEmployees = count
}
}
for areaCode, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count == mostEmployees {
fmt.Printf("most_employees|%s|%d\n", d.areas[areaCode].name, count)
}
if count == lessEmployees {
fmt.Printf("least_employees|%s|%d\n", d.areas[areaCode].name, count)
}
}
wg.Done()
}()
go func() {
for surname, bySurname := range d.salaryBySurname {
if bySurname.salaries.employeeCount <= 1 {
continue
}
for i, size := uint32(0), uint32(len(bySurname.salaries.biggest)); i < size; i++ {
fmt.Printf("last_name_max|%s|%s %s|%.2f\n", surname, bySurname.salaries.biggest[i].name, bySurname.salaries.biggest[i].surname, bySurname.salaries.biggest[i].salary)
}
}
wg.Done()
}()
wg.Wait()
}
func main() {
var optCPUProfile string
flag.StringVar(&optCPUProfile, "cpuprofile", "", "write cpu profile to file")
flag.Parse()
if len(flag.Args()) < 2 {
log.Fatalf("Usage: %s [-cpuprofile=<profile>] <input file> [number of concurrent blocks]\n", os.Args[0])
}
// Profiling
if optCPUProfile != "" {
f, err := os.Create(optCPUProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
content, err := ioutil.ReadFile(flag.Args()[0])
if err != nil {
log.Fatal(err)
}
numberOfBlocks := uint32(numberOfBlocksDefault)
if len(os.Args) >= 3 {
n, err := strconv.ParseUint(flag.Args()[1], 10, 32)
if err != nil {
log.Fatal(err)
}
numberOfBlocks = uint32(n)
}
problem := parseJSON(content, numberOfBlocks)
problem.solve()
}
| {
fmt.Printf("area_max|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.biggest[i].name, byArea.salaries.biggest[i].surname, byArea.salaries.biggest[i].salary)
} | conditional_block |
d05.go | package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime/pprof"
"strconv"
"sync"
"unsafe"
)
// area represents an area with its name and code.
type area struct {
name []byte
code []byte
}
// employee represents an employee with associate name, surname, salary and
// area.
type employee struct {
name []byte
surname []byte
salary float64
areaCode []byte
}
// salaryStats contains a list of employees with the biggest and lowest
// salaries, as well as info on the total sum of salary they receive, the number
// of employees considered and the salary average for this whole group.
type salaryStats struct {
// biggest has a lsit of employees that receive the biggest salary.
biggest []*employee
// lowest has a list of employees that receive the lowest salary.
lowest []*employee
// employeeCount has the number of employees considered in this group.
employeeCount uint32
// total is the sum of salaries of the employees in this group.
total float64
// average is the salary average of employees in this group.
average float64
}
// groupSalaryStats containes a list of employees and an associated salaryStats
// for the considered group.
type groupSalaryStats struct {
// employees is a list of employees in this group.
employees []*employee
// salaries is the stats for this group.
salaries salaryStats
}
// groupSalaryStatsMap is a map of groupSalaryStats.
type groupSalaryStatsMap map[string]*groupSalaryStats
// d05 represents a solution for this problem.
type d05 struct {
// areas contains the areas the employees work at.
areas map[string]area
// salaries contains stats on the salaries for the whole group.
salaries salaryStats
// employeeCount is the number of employees.
employeeCount uint32
// salaryByArea maps the areas with a group stats.
salaryByArea groupSalaryStatsMap
// salaryBySurname maps the surnames with a group stats.
salaryBySurname groupSalaryStatsMap
// employeesByArea maps the areas with a list of employees.
employeesByArea map[string][]*employee
}
const (
// numberOfBlocksDefault is the default number of concurrent blocks the JSON
// input file will be broken into for processing.
numberOfBlocksDefault = 16
)
// WARNING: DO NOT DO THIS AT HOME.
func unsafeString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// newD05 creates a new `d05' value to be used as either a partial or global
// solution for the problem.
func newD05() *d05 {
return &d05{employeesByArea: map[string][]*employee{}, salaryByArea: map[string]*groupSalaryStats{}, salaryBySurname: map[string]*groupSalaryStats{}, areas: map[string]area{}}
}
// merge gets a salaryStats and merge with the current one.
func (s *salaryStats) merge(salary *salaryStats) {
s.employeeCount += salary.employeeCount
s.total += salary.total
s.average = s.total / float64(s.employeeCount)
if len(s.lowest) == 0 || (len(salary.lowest) > 0 && salary.lowest[0].salary < s.lowest[0].salary) {
s.lowest = append([]*employee{}, salary.lowest...)
} else if (len(s.lowest) > 0 && len(salary.lowest) > 0) && s.lowest[0].salary == salary.lowest[0].salary {
s.lowest = append(s.lowest, salary.lowest...)
}
if len(s.biggest) == 0 || (len(salary.biggest) > 0 && salary.biggest[0].salary > s.biggest[0].salary) {
s.biggest = append([]*employee{}, salary.biggest...)
} else if (len(s.biggest) > 0 && len(salary.biggest) > 0) && s.biggest[0].salary == salary.biggest[0].salary {
s.biggest = append(s.biggest, salary.biggest...)
}
}
// merge receives a groupSalaryStats and merges with the current one.
func (m groupSalaryStatsMap) merge(key string, src *groupSalaryStats) {
if s, ok := m[key]; ok {
s.salaries.merge(&src.salaries)
return
}
m[key] = src
}
// stats receives partial solutions from `block' channel and updates the global
// solution.
func (d *d05) stats(wg *sync.WaitGroup, block chan *d05) {
for {
select {
case partialSolution := <-block:
d.employeeCount += partialSolution.employeeCount
for k := range partialSolution.areas {
d.areas[k] = partialSolution.areas[k]
}
for k := range partialSolution.employeesByArea {
d.employeesByArea[k] = append(d.employeesByArea[k], partialSolution.employeesByArea[k]...)
}
for k, v := range partialSolution.salaryBySurname {
d.salaryBySurname.merge(k, v)
}
for k, v := range partialSolution.salaryByArea {
d.salaryByArea.merge(k, v)
}
d.salaries.merge(&partialSolution.salaries)
wg.Done()
}
}
}
// updateSalaries updates the salary stats after processing the info of a single
// employee.
func (d *d05) updateSalaries(s *salaryStats, e *employee) {
if len(s.biggest) == 0 || e.salary > s.biggest[0].salary {
s.biggest = []*employee{e}
} else if s.biggest[0].salary == e.salary {
s.biggest = append(s.biggest, e)
}
if len(s.lowest) == 0 || e.salary < s.lowest[0].salary {
s.lowest = []*employee{e}
} else if s.lowest[0].salary == e.salary {
s.lowest = append(s.lowest, e)
}
s.employeeCount++
s.total += e.salary
s.average = s.total / float64(s.employeeCount)
}
// calculateSalaries updates a groupSalaryStats map after processing the info of
// a single employee.
func (d *d05) calculateSalaries(s map[string]*groupSalaryStats, key *string, e *employee) {
gs, ok := s[*key]
if !ok {
gs = &groupSalaryStats{}
s[*key] = gs
}
d.updateSalaries(&gs.salaries, e)
}
// processEmployees receive an employee and updates the associated stats.
func (d *d05) processEmployee(e *employee) {
area := unsafeString(e.areaCode)
surname := unsafeString(e.surname)
d.updateSalaries(&d.salaries, e)
d.calculateSalaries(d.salaryBySurname, &surname, e)
d.calculateSalaries(d.salaryByArea, &area, e)
_, ok := d.employeesByArea[area]
if !ok {
d.employeesByArea[area] = []*employee{}
}
d.employeesByArea[area] = append(d.employeesByArea[area], e)
d.employeeCount++
}
// parseArea parses an area from the input JSON file.
func (d *d05) parseArea(data []byte) {
totalQuotes := 0
var current uint32
var previous uint32
a := area{}
doublequote := byte('"')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], doublequote); idx == -1 {
break
}
totalQuotes++
previous = current
current = i + uint32(idx)
i = current + 1
switch totalQuotes {
// {"codigo":"SM", "nome":"Gerenciamento de Software"}
case 4:
a.code = make([]byte, current-previous-1)
copy(a.code, data[previous+1:current])
case 8:
a.name = make([]byte, current-previous-1)
copy(a.name, data[previous+1:current])
d.areas[unsafeString(a.code)] = a
return
}
}
}
// parseEmployee parses an employee from the input JSON file. If the received
// data is not an employee, it calls parseArea instead.
func (d *d05) parseEmployee(data []byte, start, end uint32) {
totalQuotes := 0
var current uint32
var previous uint32
e := employee{}
for i := start; i < end; i++ {
if data[i] != '"' {
continue
}
totalQuotes++
previous = current
current = i
switch totalQuotes {
// {"id":1,"nome":"Aahron","sobrenome":"Abaine","salario":68379.29,"area":"PI"}
case 2:
// Checking if it is an employee.
if !bytes.Equal([]byte("id"), data[previous+1:current]) {
d.parseArea(data[start : end+1])
return
}
case 6:
e.name = make([]byte, current-previous-1)
copy(e.name, data[previous+1:current])
case 10:
e.surname = make([]byte, current-previous-1)
copy(e.surname, data[previous+1:current])
case 13:
j := current - 2
for ; j > previous; j-- {
if data[j] >= '0' && data[j] <= '9' {
break
}
}
salary, err := strconv.ParseFloat(unsafeString(data[previous+2:j+1]), 64)
if err != nil {
log.Printf("oops: error converting %q to float: %v\n", data[previous+2:j+1], err)
}
e.salary = salary
case 16:
e.areaCode = make([]byte, current-previous-1)
copy(e.areaCode, data[previous+1:current])
d.processEmployee(&e)
return
}
}
}
// parseJSONBlock parses a block of JSON data from the input file. This method
// will be run concurrently and generate a partial solution from the data it
// processes, then send the result via the `block' channel.
func (d *d05) parseJSONBlock(data []byte, block chan *d05) {
var start uint32
partialSolution := newD05()
openbracket := byte('{')
closedbracket := byte('}')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], openbracket); idx == -1 {
break
}
start = i + uint32(idx)
i = start
if idx = bytes.IndexByte(data[i:], closedbracket); idx == -1 {
break
}
i += uint32(idx)
partialSolution.parseEmployee(data, start, i)
i++
}
block <- partialSolution
}
// parseJSON receives the full JSON data from the input file and calls
// `parseJSONBlocks' to process the smaller blocks. It returns the global
// solution for the problem at hand, once the partial solutions have all been
// accounted for.
func parseJSON(data []byte, blocksToUse uint32) *d05 {
solution := newD05()
block := make(chan *d05)
wg := sync.WaitGroup{}
// Goroutine that will receive the partial solutions.
go solution.stats(&wg, block)
// An average step to form the blocks.
step := uint32(len(data)) / blocksToUse
size := uint32(len(data))
i := step
start := uint32(1)
bracket := byte('{')
var idx int
for p := uint32(0); p < blocksToUse-1; p++ {
for i < size {
if idx = bytes.IndexByte(data[i:], bracket); idx == -1 {
break
}
wg.Add(1)
i += uint32(idx)
go solution.parseJSONBlock(data[start:i-1], block)
start = i
i += step
break
}
}
// Last block.
wg.Add(1)
go solution.parseJSONBlock(data[start:], block)
wg.Wait()
return solution
}
// solve prints the solution for the problem, once everything is done.
func (d *d05) solve() {
wg := sync.WaitGroup{}
wg.Add(5)
go func() {
for i, size := uint32(0), uint32(len(d.salaries.biggest)); i < size; i++ {
fmt.Printf("global_max|%s %s|%.2f\n", d.salaries.biggest[i].name, d.salaries.biggest[i].surname, d.salaries.biggest[i].salary)
}
wg.Done()
}()
go func() {
for i, size := uint32(0), uint32(len(d.salaries.lowest)); i < size; i++ {
fmt.Printf("global_min|%s %s|%.2f\n", d.salaries.lowest[i].name, d.salaries.lowest[i].surname, d.salaries.lowest[i].salary)
}
wg.Done()
}()
fmt.Printf("global_avg|%.2f\n", d.salaries.average)
go func() {
var i uint32
var size uint32
for areaCode, byArea := range d.salaryByArea {
for i, size = 0, uint32(len(byArea.salaries.biggest)); i < size; i++ {
fmt.Printf("area_max|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.biggest[i].name, byArea.salaries.biggest[i].surname, byArea.salaries.biggest[i].salary)
}
for i, size = 0, uint32(len(byArea.salaries.lowest)); i < size; i++ {
fmt.Printf("area_min|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.lowest[i].name, byArea.salaries.lowest[i].surname, byArea.salaries.lowest[i].salary)
}
fmt.Printf("area_avg|%s|%.2f\n", d.areas[areaCode].name, byArea.salaries.average)
}
wg.Done()
}()
go func() {
lessEmployees := d.employeeCount
mostEmployees := uint32(0)
count := uint32(0)
for _, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count < lessEmployees {
lessEmployees = count
}
if count > mostEmployees {
mostEmployees = count
}
}
for areaCode, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count == mostEmployees {
fmt.Printf("most_employees|%s|%d\n", d.areas[areaCode].name, count)
}
if count == lessEmployees {
fmt.Printf("least_employees|%s|%d\n", d.areas[areaCode].name, count)
}
}
wg.Done()
}()
go func() {
for surname, bySurname := range d.salaryBySurname {
if bySurname.salaries.employeeCount <= 1 {
continue
}
for i, size := uint32(0), uint32(len(bySurname.salaries.biggest)); i < size; i++ {
fmt.Printf("last_name_max|%s|%s %s|%.2f\n", surname, bySurname.salaries.biggest[i].name, bySurname.salaries.biggest[i].surname, bySurname.salaries.biggest[i].salary)
} | }
func main() {
var optCPUProfile string
flag.StringVar(&optCPUProfile, "cpuprofile", "", "write cpu profile to file")
flag.Parse()
if len(flag.Args()) < 2 {
log.Fatalf("Usage: %s [-cpuprofile=<profile>] <input file> [number of concurrent blocks]\n", os.Args[0])
}
// Profiling
if optCPUProfile != "" {
f, err := os.Create(optCPUProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
content, err := ioutil.ReadFile(flag.Args()[0])
if err != nil {
log.Fatal(err)
}
numberOfBlocks := uint32(numberOfBlocksDefault)
if len(os.Args) >= 3 {
n, err := strconv.ParseUint(flag.Args()[1], 10, 32)
if err != nil {
log.Fatal(err)
}
numberOfBlocks = uint32(n)
}
problem := parseJSON(content, numberOfBlocks)
problem.solve()
} | }
wg.Done()
}()
wg.Wait() | random_line_split |
d05.go | package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime/pprof"
"strconv"
"sync"
"unsafe"
)
// area represents an area with its name and code.
type area struct {
name []byte
code []byte
}
// employee represents an employee with associate name, surname, salary and
// area.
type employee struct {
name []byte
surname []byte
salary float64
areaCode []byte
}
// salaryStats contains a list of employees with the biggest and lowest
// salaries, as well as info on the total sum of salary they receive, the number
// of employees considered and the salary average for this whole group.
type salaryStats struct {
// biggest has a lsit of employees that receive the biggest salary.
biggest []*employee
// lowest has a list of employees that receive the lowest salary.
lowest []*employee
// employeeCount has the number of employees considered in this group.
employeeCount uint32
// total is the sum of salaries of the employees in this group.
total float64
// average is the salary average of employees in this group.
average float64
}
// groupSalaryStats containes a list of employees and an associated salaryStats
// for the considered group.
type groupSalaryStats struct {
// employees is a list of employees in this group.
employees []*employee
// salaries is the stats for this group.
salaries salaryStats
}
// groupSalaryStatsMap is a map of groupSalaryStats.
type groupSalaryStatsMap map[string]*groupSalaryStats
// d05 represents a solution for this problem.
type d05 struct {
// areas contains the areas the employees work at.
areas map[string]area
// salaries contains stats on the salaries for the whole group.
salaries salaryStats
// employeeCount is the number of employees.
employeeCount uint32
// salaryByArea maps the areas with a group stats.
salaryByArea groupSalaryStatsMap
// salaryBySurname maps the surnames with a group stats.
salaryBySurname groupSalaryStatsMap
// employeesByArea maps the areas with a list of employees.
employeesByArea map[string][]*employee
}
const (
// numberOfBlocksDefault is the default number of concurrent blocks the JSON
// input file will be broken into for processing.
numberOfBlocksDefault = 16
)
// WARNING: DO NOT DO THIS AT HOME.
func unsafeString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// newD05 creates a new `d05' value to be used as either a partial or global
// solution for the problem.
func newD05() *d05 {
return &d05{employeesByArea: map[string][]*employee{}, salaryByArea: map[string]*groupSalaryStats{}, salaryBySurname: map[string]*groupSalaryStats{}, areas: map[string]area{}}
}
// merge gets a salaryStats and merge with the current one.
func (s *salaryStats) merge(salary *salaryStats) {
s.employeeCount += salary.employeeCount
s.total += salary.total
s.average = s.total / float64(s.employeeCount)
if len(s.lowest) == 0 || (len(salary.lowest) > 0 && salary.lowest[0].salary < s.lowest[0].salary) {
s.lowest = append([]*employee{}, salary.lowest...)
} else if (len(s.lowest) > 0 && len(salary.lowest) > 0) && s.lowest[0].salary == salary.lowest[0].salary {
s.lowest = append(s.lowest, salary.lowest...)
}
if len(s.biggest) == 0 || (len(salary.biggest) > 0 && salary.biggest[0].salary > s.biggest[0].salary) {
s.biggest = append([]*employee{}, salary.biggest...)
} else if (len(s.biggest) > 0 && len(salary.biggest) > 0) && s.biggest[0].salary == salary.biggest[0].salary {
s.biggest = append(s.biggest, salary.biggest...)
}
}
// merge receives a groupSalaryStats and merges with the current one.
func (m groupSalaryStatsMap) merge(key string, src *groupSalaryStats) {
if s, ok := m[key]; ok {
s.salaries.merge(&src.salaries)
return
}
m[key] = src
}
// stats receives partial solutions from `block' channel and updates the global
// solution.
func (d *d05) stats(wg *sync.WaitGroup, block chan *d05) {
for {
select {
case partialSolution := <-block:
d.employeeCount += partialSolution.employeeCount
for k := range partialSolution.areas {
d.areas[k] = partialSolution.areas[k]
}
for k := range partialSolution.employeesByArea {
d.employeesByArea[k] = append(d.employeesByArea[k], partialSolution.employeesByArea[k]...)
}
for k, v := range partialSolution.salaryBySurname {
d.salaryBySurname.merge(k, v)
}
for k, v := range partialSolution.salaryByArea {
d.salaryByArea.merge(k, v)
}
d.salaries.merge(&partialSolution.salaries)
wg.Done()
}
}
}
// updateSalaries updates the salary stats after processing the info of a single
// employee.
func (d *d05) updateSalaries(s *salaryStats, e *employee) {
if len(s.biggest) == 0 || e.salary > s.biggest[0].salary {
s.biggest = []*employee{e}
} else if s.biggest[0].salary == e.salary {
s.biggest = append(s.biggest, e)
}
if len(s.lowest) == 0 || e.salary < s.lowest[0].salary {
s.lowest = []*employee{e}
} else if s.lowest[0].salary == e.salary {
s.lowest = append(s.lowest, e)
}
s.employeeCount++
s.total += e.salary
s.average = s.total / float64(s.employeeCount)
}
// calculateSalaries updates a groupSalaryStats map after processing the info of
// a single employee.
func (d *d05) calculateSalaries(s map[string]*groupSalaryStats, key *string, e *employee) {
gs, ok := s[*key]
if !ok {
gs = &groupSalaryStats{}
s[*key] = gs
}
d.updateSalaries(&gs.salaries, e)
}
// processEmployees receive an employee and updates the associated stats.
func (d *d05) processEmployee(e *employee) {
area := unsafeString(e.areaCode)
surname := unsafeString(e.surname)
d.updateSalaries(&d.salaries, e)
d.calculateSalaries(d.salaryBySurname, &surname, e)
d.calculateSalaries(d.salaryByArea, &area, e)
_, ok := d.employeesByArea[area]
if !ok {
d.employeesByArea[area] = []*employee{}
}
d.employeesByArea[area] = append(d.employeesByArea[area], e)
d.employeeCount++
}
// parseArea parses an area from the input JSON file.
func (d *d05) parseArea(data []byte) {
totalQuotes := 0
var current uint32
var previous uint32
a := area{}
doublequote := byte('"')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], doublequote); idx == -1 {
break
}
totalQuotes++
previous = current
current = i + uint32(idx)
i = current + 1
switch totalQuotes {
// {"codigo":"SM", "nome":"Gerenciamento de Software"}
case 4:
a.code = make([]byte, current-previous-1)
copy(a.code, data[previous+1:current])
case 8:
a.name = make([]byte, current-previous-1)
copy(a.name, data[previous+1:current])
d.areas[unsafeString(a.code)] = a
return
}
}
}
// parseEmployee parses an employee from the input JSON file. If the received
// data is not an employee, it calls parseArea instead.
func (d *d05) parseEmployee(data []byte, start, end uint32) {
totalQuotes := 0
var current uint32
var previous uint32
e := employee{}
for i := start; i < end; i++ {
if data[i] != '"' {
continue
}
totalQuotes++
previous = current
current = i
switch totalQuotes {
// {"id":1,"nome":"Aahron","sobrenome":"Abaine","salario":68379.29,"area":"PI"}
case 2:
// Checking if it is an employee.
if !bytes.Equal([]byte("id"), data[previous+1:current]) {
d.parseArea(data[start : end+1])
return
}
case 6:
e.name = make([]byte, current-previous-1)
copy(e.name, data[previous+1:current])
case 10:
e.surname = make([]byte, current-previous-1)
copy(e.surname, data[previous+1:current])
case 13:
j := current - 2
for ; j > previous; j-- {
if data[j] >= '0' && data[j] <= '9' {
break
}
}
salary, err := strconv.ParseFloat(unsafeString(data[previous+2:j+1]), 64)
if err != nil {
log.Printf("oops: error converting %q to float: %v\n", data[previous+2:j+1], err)
}
e.salary = salary
case 16:
e.areaCode = make([]byte, current-previous-1)
copy(e.areaCode, data[previous+1:current])
d.processEmployee(&e)
return
}
}
}
// parseJSONBlock parses a block of JSON data from the input file. This method
// will be run concurrently and generate a partial solution from the data it
// processes, then send the result via the `block' channel.
func (d *d05) | (data []byte, block chan *d05) {
var start uint32
partialSolution := newD05()
openbracket := byte('{')
closedbracket := byte('}')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], openbracket); idx == -1 {
break
}
start = i + uint32(idx)
i = start
if idx = bytes.IndexByte(data[i:], closedbracket); idx == -1 {
break
}
i += uint32(idx)
partialSolution.parseEmployee(data, start, i)
i++
}
block <- partialSolution
}
// parseJSON receives the full JSON data from the input file and calls
// `parseJSONBlocks' to process the smaller blocks. It returns the global
// solution for the problem at hand, once the partial solutions have all been
// accounted for.
func parseJSON(data []byte, blocksToUse uint32) *d05 {
solution := newD05()
block := make(chan *d05)
wg := sync.WaitGroup{}
// Goroutine that will receive the partial solutions.
go solution.stats(&wg, block)
// An average step to form the blocks.
step := uint32(len(data)) / blocksToUse
size := uint32(len(data))
i := step
start := uint32(1)
bracket := byte('{')
var idx int
for p := uint32(0); p < blocksToUse-1; p++ {
for i < size {
if idx = bytes.IndexByte(data[i:], bracket); idx == -1 {
break
}
wg.Add(1)
i += uint32(idx)
go solution.parseJSONBlock(data[start:i-1], block)
start = i
i += step
break
}
}
// Last block.
wg.Add(1)
go solution.parseJSONBlock(data[start:], block)
wg.Wait()
return solution
}
// solve prints the solution for the problem, once everything is done.
func (d *d05) solve() {
wg := sync.WaitGroup{}
wg.Add(5)
go func() {
for i, size := uint32(0), uint32(len(d.salaries.biggest)); i < size; i++ {
fmt.Printf("global_max|%s %s|%.2f\n", d.salaries.biggest[i].name, d.salaries.biggest[i].surname, d.salaries.biggest[i].salary)
}
wg.Done()
}()
go func() {
for i, size := uint32(0), uint32(len(d.salaries.lowest)); i < size; i++ {
fmt.Printf("global_min|%s %s|%.2f\n", d.salaries.lowest[i].name, d.salaries.lowest[i].surname, d.salaries.lowest[i].salary)
}
wg.Done()
}()
fmt.Printf("global_avg|%.2f\n", d.salaries.average)
go func() {
var i uint32
var size uint32
for areaCode, byArea := range d.salaryByArea {
for i, size = 0, uint32(len(byArea.salaries.biggest)); i < size; i++ {
fmt.Printf("area_max|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.biggest[i].name, byArea.salaries.biggest[i].surname, byArea.salaries.biggest[i].salary)
}
for i, size = 0, uint32(len(byArea.salaries.lowest)); i < size; i++ {
fmt.Printf("area_min|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.lowest[i].name, byArea.salaries.lowest[i].surname, byArea.salaries.lowest[i].salary)
}
fmt.Printf("area_avg|%s|%.2f\n", d.areas[areaCode].name, byArea.salaries.average)
}
wg.Done()
}()
go func() {
lessEmployees := d.employeeCount
mostEmployees := uint32(0)
count := uint32(0)
for _, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count < lessEmployees {
lessEmployees = count
}
if count > mostEmployees {
mostEmployees = count
}
}
for areaCode, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count == mostEmployees {
fmt.Printf("most_employees|%s|%d\n", d.areas[areaCode].name, count)
}
if count == lessEmployees {
fmt.Printf("least_employees|%s|%d\n", d.areas[areaCode].name, count)
}
}
wg.Done()
}()
go func() {
for surname, bySurname := range d.salaryBySurname {
if bySurname.salaries.employeeCount <= 1 {
continue
}
for i, size := uint32(0), uint32(len(bySurname.salaries.biggest)); i < size; i++ {
fmt.Printf("last_name_max|%s|%s %s|%.2f\n", surname, bySurname.salaries.biggest[i].name, bySurname.salaries.biggest[i].surname, bySurname.salaries.biggest[i].salary)
}
}
wg.Done()
}()
wg.Wait()
}
func main() {
var optCPUProfile string
flag.StringVar(&optCPUProfile, "cpuprofile", "", "write cpu profile to file")
flag.Parse()
if len(flag.Args()) < 2 {
log.Fatalf("Usage: %s [-cpuprofile=<profile>] <input file> [number of concurrent blocks]\n", os.Args[0])
}
// Profiling
if optCPUProfile != "" {
f, err := os.Create(optCPUProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
content, err := ioutil.ReadFile(flag.Args()[0])
if err != nil {
log.Fatal(err)
}
numberOfBlocks := uint32(numberOfBlocksDefault)
if len(os.Args) >= 3 {
n, err := strconv.ParseUint(flag.Args()[1], 10, 32)
if err != nil {
log.Fatal(err)
}
numberOfBlocks = uint32(n)
}
problem := parseJSON(content, numberOfBlocks)
problem.solve()
}
| parseJSONBlock | identifier_name |
d05.go | package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime/pprof"
"strconv"
"sync"
"unsafe"
)
// area represents an area with its name and code.
type area struct {
name []byte
code []byte
}
// employee represents an employee with associate name, surname, salary and
// area.
type employee struct {
name []byte
surname []byte
salary float64
areaCode []byte
}
// salaryStats contains a list of employees with the biggest and lowest
// salaries, as well as info on the total sum of salary they receive, the number
// of employees considered and the salary average for this whole group.
type salaryStats struct {
// biggest has a lsit of employees that receive the biggest salary.
biggest []*employee
// lowest has a list of employees that receive the lowest salary.
lowest []*employee
// employeeCount has the number of employees considered in this group.
employeeCount uint32
// total is the sum of salaries of the employees in this group.
total float64
// average is the salary average of employees in this group.
average float64
}
// groupSalaryStats containes a list of employees and an associated salaryStats
// for the considered group.
type groupSalaryStats struct {
// employees is a list of employees in this group.
employees []*employee
// salaries is the stats for this group.
salaries salaryStats
}
// groupSalaryStatsMap is a map of groupSalaryStats.
type groupSalaryStatsMap map[string]*groupSalaryStats
// d05 represents a solution for this problem.
type d05 struct {
// areas contains the areas the employees work at.
areas map[string]area
// salaries contains stats on the salaries for the whole group.
salaries salaryStats
// employeeCount is the number of employees.
employeeCount uint32
// salaryByArea maps the areas with a group stats.
salaryByArea groupSalaryStatsMap
// salaryBySurname maps the surnames with a group stats.
salaryBySurname groupSalaryStatsMap
// employeesByArea maps the areas with a list of employees.
employeesByArea map[string][]*employee
}
const (
// numberOfBlocksDefault is the default number of concurrent blocks the JSON
// input file will be broken into for processing.
numberOfBlocksDefault = 16
)
// WARNING: DO NOT DO THIS AT HOME.
func unsafeString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// newD05 creates a new `d05' value to be used as either a partial or global
// solution for the problem.
func newD05() *d05 {
return &d05{employeesByArea: map[string][]*employee{}, salaryByArea: map[string]*groupSalaryStats{}, salaryBySurname: map[string]*groupSalaryStats{}, areas: map[string]area{}}
}
// merge gets a salaryStats and merge with the current one.
func (s *salaryStats) merge(salary *salaryStats) {
s.employeeCount += salary.employeeCount
s.total += salary.total
s.average = s.total / float64(s.employeeCount)
if len(s.lowest) == 0 || (len(salary.lowest) > 0 && salary.lowest[0].salary < s.lowest[0].salary) {
s.lowest = append([]*employee{}, salary.lowest...)
} else if (len(s.lowest) > 0 && len(salary.lowest) > 0) && s.lowest[0].salary == salary.lowest[0].salary {
s.lowest = append(s.lowest, salary.lowest...)
}
if len(s.biggest) == 0 || (len(salary.biggest) > 0 && salary.biggest[0].salary > s.biggest[0].salary) {
s.biggest = append([]*employee{}, salary.biggest...)
} else if (len(s.biggest) > 0 && len(salary.biggest) > 0) && s.biggest[0].salary == salary.biggest[0].salary {
s.biggest = append(s.biggest, salary.biggest...)
}
}
// merge receives a groupSalaryStats and merges with the current one.
func (m groupSalaryStatsMap) merge(key string, src *groupSalaryStats) {
if s, ok := m[key]; ok {
s.salaries.merge(&src.salaries)
return
}
m[key] = src
}
// stats receives partial solutions from `block' channel and updates the global
// solution.
func (d *d05) stats(wg *sync.WaitGroup, block chan *d05) {
for {
select {
case partialSolution := <-block:
d.employeeCount += partialSolution.employeeCount
for k := range partialSolution.areas {
d.areas[k] = partialSolution.areas[k]
}
for k := range partialSolution.employeesByArea {
d.employeesByArea[k] = append(d.employeesByArea[k], partialSolution.employeesByArea[k]...)
}
for k, v := range partialSolution.salaryBySurname {
d.salaryBySurname.merge(k, v)
}
for k, v := range partialSolution.salaryByArea {
d.salaryByArea.merge(k, v)
}
d.salaries.merge(&partialSolution.salaries)
wg.Done()
}
}
}
// updateSalaries updates the salary stats after processing the info of a single
// employee.
func (d *d05) updateSalaries(s *salaryStats, e *employee) {
if len(s.biggest) == 0 || e.salary > s.biggest[0].salary {
s.biggest = []*employee{e}
} else if s.biggest[0].salary == e.salary {
s.biggest = append(s.biggest, e)
}
if len(s.lowest) == 0 || e.salary < s.lowest[0].salary {
s.lowest = []*employee{e}
} else if s.lowest[0].salary == e.salary {
s.lowest = append(s.lowest, e)
}
s.employeeCount++
s.total += e.salary
s.average = s.total / float64(s.employeeCount)
}
// calculateSalaries updates a groupSalaryStats map after processing the info of
// a single employee.
func (d *d05) calculateSalaries(s map[string]*groupSalaryStats, key *string, e *employee) |
// processEmployees receive an employee and updates the associated stats.
func (d *d05) processEmployee(e *employee) {
area := unsafeString(e.areaCode)
surname := unsafeString(e.surname)
d.updateSalaries(&d.salaries, e)
d.calculateSalaries(d.salaryBySurname, &surname, e)
d.calculateSalaries(d.salaryByArea, &area, e)
_, ok := d.employeesByArea[area]
if !ok {
d.employeesByArea[area] = []*employee{}
}
d.employeesByArea[area] = append(d.employeesByArea[area], e)
d.employeeCount++
}
// parseArea parses an area from the input JSON file.
func (d *d05) parseArea(data []byte) {
totalQuotes := 0
var current uint32
var previous uint32
a := area{}
doublequote := byte('"')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], doublequote); idx == -1 {
break
}
totalQuotes++
previous = current
current = i + uint32(idx)
i = current + 1
switch totalQuotes {
// {"codigo":"SM", "nome":"Gerenciamento de Software"}
case 4:
a.code = make([]byte, current-previous-1)
copy(a.code, data[previous+1:current])
case 8:
a.name = make([]byte, current-previous-1)
copy(a.name, data[previous+1:current])
d.areas[unsafeString(a.code)] = a
return
}
}
}
// parseEmployee parses an employee from the input JSON file. If the received
// data is not an employee, it calls parseArea instead.
func (d *d05) parseEmployee(data []byte, start, end uint32) {
totalQuotes := 0
var current uint32
var previous uint32
e := employee{}
for i := start; i < end; i++ {
if data[i] != '"' {
continue
}
totalQuotes++
previous = current
current = i
switch totalQuotes {
// {"id":1,"nome":"Aahron","sobrenome":"Abaine","salario":68379.29,"area":"PI"}
case 2:
// Checking if it is an employee.
if !bytes.Equal([]byte("id"), data[previous+1:current]) {
d.parseArea(data[start : end+1])
return
}
case 6:
e.name = make([]byte, current-previous-1)
copy(e.name, data[previous+1:current])
case 10:
e.surname = make([]byte, current-previous-1)
copy(e.surname, data[previous+1:current])
case 13:
j := current - 2
for ; j > previous; j-- {
if data[j] >= '0' && data[j] <= '9' {
break
}
}
salary, err := strconv.ParseFloat(unsafeString(data[previous+2:j+1]), 64)
if err != nil {
log.Printf("oops: error converting %q to float: %v\n", data[previous+2:j+1], err)
}
e.salary = salary
case 16:
e.areaCode = make([]byte, current-previous-1)
copy(e.areaCode, data[previous+1:current])
d.processEmployee(&e)
return
}
}
}
// parseJSONBlock parses a block of JSON data from the input file. This method
// will be run concurrently and generate a partial solution from the data it
// processes, then send the result via the `block' channel.
func (d *d05) parseJSONBlock(data []byte, block chan *d05) {
var start uint32
partialSolution := newD05()
openbracket := byte('{')
closedbracket := byte('}')
i := uint32(0)
var idx int
for {
if idx = bytes.IndexByte(data[i:], openbracket); idx == -1 {
break
}
start = i + uint32(idx)
i = start
if idx = bytes.IndexByte(data[i:], closedbracket); idx == -1 {
break
}
i += uint32(idx)
partialSolution.parseEmployee(data, start, i)
i++
}
block <- partialSolution
}
// parseJSON receives the full JSON data from the input file and calls
// `parseJSONBlocks' to process the smaller blocks. It returns the global
// solution for the problem at hand, once the partial solutions have all been
// accounted for.
func parseJSON(data []byte, blocksToUse uint32) *d05 {
solution := newD05()
block := make(chan *d05)
wg := sync.WaitGroup{}
// Goroutine that will receive the partial solutions.
go solution.stats(&wg, block)
// An average step to form the blocks.
step := uint32(len(data)) / blocksToUse
size := uint32(len(data))
i := step
start := uint32(1)
bracket := byte('{')
var idx int
for p := uint32(0); p < blocksToUse-1; p++ {
for i < size {
if idx = bytes.IndexByte(data[i:], bracket); idx == -1 {
break
}
wg.Add(1)
i += uint32(idx)
go solution.parseJSONBlock(data[start:i-1], block)
start = i
i += step
break
}
}
// Last block.
wg.Add(1)
go solution.parseJSONBlock(data[start:], block)
wg.Wait()
return solution
}
// solve prints the solution for the problem, once everything is done.
func (d *d05) solve() {
wg := sync.WaitGroup{}
wg.Add(5)
go func() {
for i, size := uint32(0), uint32(len(d.salaries.biggest)); i < size; i++ {
fmt.Printf("global_max|%s %s|%.2f\n", d.salaries.biggest[i].name, d.salaries.biggest[i].surname, d.salaries.biggest[i].salary)
}
wg.Done()
}()
go func() {
for i, size := uint32(0), uint32(len(d.salaries.lowest)); i < size; i++ {
fmt.Printf("global_min|%s %s|%.2f\n", d.salaries.lowest[i].name, d.salaries.lowest[i].surname, d.salaries.lowest[i].salary)
}
wg.Done()
}()
fmt.Printf("global_avg|%.2f\n", d.salaries.average)
go func() {
var i uint32
var size uint32
for areaCode, byArea := range d.salaryByArea {
for i, size = 0, uint32(len(byArea.salaries.biggest)); i < size; i++ {
fmt.Printf("area_max|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.biggest[i].name, byArea.salaries.biggest[i].surname, byArea.salaries.biggest[i].salary)
}
for i, size = 0, uint32(len(byArea.salaries.lowest)); i < size; i++ {
fmt.Printf("area_min|%s|%s %s|%.2f\n", d.areas[areaCode].name, byArea.salaries.lowest[i].name, byArea.salaries.lowest[i].surname, byArea.salaries.lowest[i].salary)
}
fmt.Printf("area_avg|%s|%.2f\n", d.areas[areaCode].name, byArea.salaries.average)
}
wg.Done()
}()
go func() {
lessEmployees := d.employeeCount
mostEmployees := uint32(0)
count := uint32(0)
for _, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count < lessEmployees {
lessEmployees = count
}
if count > mostEmployees {
mostEmployees = count
}
}
for areaCode, byArea := range d.employeesByArea {
count = uint32(len(byArea))
if count == mostEmployees {
fmt.Printf("most_employees|%s|%d\n", d.areas[areaCode].name, count)
}
if count == lessEmployees {
fmt.Printf("least_employees|%s|%d\n", d.areas[areaCode].name, count)
}
}
wg.Done()
}()
go func() {
for surname, bySurname := range d.salaryBySurname {
if bySurname.salaries.employeeCount <= 1 {
continue
}
for i, size := uint32(0), uint32(len(bySurname.salaries.biggest)); i < size; i++ {
fmt.Printf("last_name_max|%s|%s %s|%.2f\n", surname, bySurname.salaries.biggest[i].name, bySurname.salaries.biggest[i].surname, bySurname.salaries.biggest[i].salary)
}
}
wg.Done()
}()
wg.Wait()
}
func main() {
var optCPUProfile string
flag.StringVar(&optCPUProfile, "cpuprofile", "", "write cpu profile to file")
flag.Parse()
if len(flag.Args()) < 2 {
log.Fatalf("Usage: %s [-cpuprofile=<profile>] <input file> [number of concurrent blocks]\n", os.Args[0])
}
// Profiling
if optCPUProfile != "" {
f, err := os.Create(optCPUProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
content, err := ioutil.ReadFile(flag.Args()[0])
if err != nil {
log.Fatal(err)
}
numberOfBlocks := uint32(numberOfBlocksDefault)
if len(os.Args) >= 3 {
n, err := strconv.ParseUint(flag.Args()[1], 10, 32)
if err != nil {
log.Fatal(err)
}
numberOfBlocks = uint32(n)
}
problem := parseJSON(content, numberOfBlocks)
problem.solve()
}
| {
gs, ok := s[*key]
if !ok {
gs = &groupSalaryStats{}
s[*key] = gs
}
d.updateSalaries(&gs.salaries, e)
} | identifier_body |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, // !
Question, // ?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, // ,
Dot, // .
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, // ..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // || | LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind != TokenKind::LineComment && token_kind != TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind != TokenKind::LineComment && next_kind != TokenKind::BlockComment && next_kind != TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | OrEquals, // |=
EqualEqual, // ==
NotEqual, // !=
ShiftLeft, // << | random_line_split |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, // !
Question, // ?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, // ,
Dot, // .
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, // ..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, // !=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct | <'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind != TokenKind::LineComment && token_kind != TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind != TokenKind::LineComment && next_kind != TokenKind::BlockComment && next_kind != TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | TokenIter | identifier_name |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, // !
Question, // ?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, // ,
Dot, // .
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, // ..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, // !=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind != TokenKind::LineComment && token_kind != TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind != TokenKind::LineComment && next_kind != TokenKind::BlockComment && next_kind != TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) |
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
} | identifier_body |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, // !
Question, // ?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, // ,
Dot, // .
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, // ..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, // !=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self != TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind != TokenKind::LineComment && token_kind != TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind != TokenKind::LineComment && next_kind != TokenKind::BlockComment && next_kind != TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else |
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | {
self.cur += 1;
} | conditional_block |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
}
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn | () {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k != 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456,
0.8134131062097899,
0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
}
| gamma_stability | identifier_name |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
}
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn gamma_stability() {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k != 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456, | 0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
} | 0.8134131062097899, | random_line_split |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) |
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn gamma_stability() {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k != 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456,
0.8134131062097899,
0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
}
| {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
} | identifier_body |
slime_volleyball.js | var SlimeVolleyball,
__hasProp = Object.prototype.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function | () { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor; child.__super__ = parent.prototype; return child; };
SlimeVolleyball = (function(_super) {
__extends(SlimeVolleyball, _super);
SlimeVolleyball.name = 'SlimeVolleyball';
function SlimeVolleyball() {
return SlimeVolleyball.__super__.constructor.apply(this, arguments);
}
SlimeVolleyball.prototype.init = function(dontOverrideInput) {
var bg, canvas, ctx, loader,
_this = this;
if (Globals.windowSize && Globals.windowSize.width) {
this.width = Globals.windowSize.width;
this.height = Globals.windowSize.height;
} else {
this.width = 800;
this.height = 447;
}
this.world || (this.world = new World(this.width, this.height, Globals.Input));
loader = Globals.Loader;
bg = document.createElement('canvas');
ctx = bg.getContext('2d');
ctx.fillStyle = '#ccc';
ctx.fillRect(0, 0, this.width, this.height);
this.bg = new StretchySprite(0, 0, this.width, this.height, 200, 1, bg);
this.freezeGame = true;
this.initialLoad = true;
this.refreshSprites();
this.loadingEnemy = {};
this.world.reset();
this.last = 0;
this.time = 15;
this.score = 0;
this.displayMsg = "\n Instructions:";
this.displayMsg += "\n Cross road back and forth, knock out foam sword foes";
this.displayMsg += "\n Use arrow keys to move";
this.displayMsg += "\n Getting hit by a green or white bus will kill you";
this.displayMsg += "\n Purple buses don't do damage, because you're Nick";
this.displayMsg += "\n ... and you're pretty much invincible";
this.displayMsg += "\n You have 15 seconds each time to cross the road";
this.displayMsg += "\n\nClick anywhere to start";
this.keyState = {
left: false,
right: false,
up: false
};
if (!dontOverrideInput) {
this.world.handleInput = function() {
return _this.world.p1.handleInput(Globals.Input);
};
}
SlimeVolleyball.__super__.init.call(this);
canvas = Globals.Manager.canvas;
_this = this;
canvas.addEventListener('click', function() {
return _this.handleMouseDown();
}, true);
return canvas.addEventListener('touchstart', function() {
return _this.handleMouseDown();
}, true);
};
SlimeVolleyball.prototype.refreshSprites = function() {
var gamepad, gamepadUI;
this.sprites = [];
this.sprites.push(this.bg, this.world.road, this.world.bushes, this.world.building1, this.world.building2, this.world.p1);
gamepad = new GamePad({
left: [0, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
right: [this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
up: [2 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
down: [3 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM]
});
this.buttons['gamepad'] = gamepad;
if (!!('ontouchstart' in window) ? 1 : 0) {
gamepadUI = new Sprite(0, this.height * .85, this.width, this.height * .15, Globals.Loader.getAsset('gamepad'));
return this.sprites.push(gamepadUI);
}
};
SlimeVolleyball.prototype.start = function() {
var _this = this;
document.getElementById('ui2').style.display = 'none';
document.getElementById('ui').style.display = 'none';
this.refreshSprites();
this.time = 15;
this.score = 0;
this.world.enemies = {};
this.world.foamEnemies = {};
this.displayMsg = null;
this.freezeGame = false;
this.world.p1.stop = false;
this.world.p1.x = this.width / 2 - this.world.p1.width / 2;
this.world.p1.y = this.height - this.world.p1.height;
if (this.world.p1.curDir === 'Down') this.world.p1.toggleDirection();
this.timeInterval = setInterval(function() {
if (_this.time === 1) {
_this.time--;
return _this.handleWin();
} else {
return _this.time--;
}
}, 1000);
Enemy.fetchCanvases();
this.interval = 2500;
this.loadEnemy(0);
this.loadEnemy(1);
this.loadEnemy(2);
this.loadEnemy(3);
return this.loadFoamEnemy('top');
};
SlimeVolleyball.prototype.handleMouseDown = function(e) {
if (this.freezeGame) {
this.start();
return this.world.lastStep = false;
}
};
SlimeVolleyball.prototype.loadFoamEnemy = function(dir) {
var enemy, id;
enemy = new FoamEnemy(dir);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
return this.world.foamEnemies[id] = enemy;
};
SlimeVolleyball.prototype.loadEnemy = function(lane) {
var enemy, id, _this;
if (!this.freezeGame) {
enemy = new Enemy(lane);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
this.world.enemies[id] = enemy;
_this = this;
return this.loadingEnemy[lane] = setTimeout(function() {
return _this.loadEnemy(lane);
}, (0.8 + 0.4 * Math.random()) * (10 * this.width / this.enemyVel));
}
};
SlimeVolleyball.prototype.showMessage = function(msg) {
var msgObj, textInterval;
if (window.innerWidth < 450) return;
msgObj = new Message(msg, this.world.p1.x, this.world.p1.y);
this.sprites.push(msgObj);
return textInterval = setInterval(function() {
if (msgObj.opacity > 0.005) {
msgObj.opacity -= 0.005;
return msgObj.fontSize += 0.2;
} else {
msgObj.remove = true;
return clearInterval(textInterval);
}
}, 10);
};
/*
# Set a minimum for it
if @interval - @intervalStep >= 1000 && @interval > 1000
@interval -= @intervalStep
@intervalStep += @intervalIncrease
else if @interval > 1000
@interval = 1000
else if @interval > 500
@interval -= 7
*/
SlimeVolleyball.prototype.inputChanged = function() {
var changed, currState, input, key, val, _ref;
input = Globals.Input;
changed = false;
_ref = this.keyState;
for (key in _ref) {
if (!__hasProp.call(_ref, key)) continue;
val = _ref[key];
currState = input[key](0);
if (val !== currState) {
if (!changed) changed = {};
changed[key] = currState;
this.keyState[key] = currState;
}
}
return changed;
};
SlimeVolleyball.prototype.pause = function() {
this.freezeGame = true;
return this.displayMsg = "Paused. Click to resume";
};
SlimeVolleyball.prototype.draw = function() {
var i, msg, msgs, offset, sprite, _i, _j, _len, _len2, _ref, _results;
this.ctx.clearRect(0, 0, this.width, this.height);
_ref = this.sprites;
for (i = _i = 0, _len = _ref.length; _i < _len; i = ++_i) {
sprite = _ref[i];
if (sprite && !sprite.remove) sprite.draw(this.ctx);
}
this.ctx.font = 'bold 18px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.textAlign = 'left';
msg = "Score: " + this.score;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.fillText(msg, 15, 25);
if (window.innerWidth > 500 && false) this.ctx.strokeText(msg, 15, 25);
this.ctx.textAlign = 'right';
/*
time = new Date().getTime()
@zzz = (time-@last) if !( time % 10 )
if !@zzz
@zzz = 0
*/
msg = "Time: " + this.time;
this.ctx.fillText(msg, this.width - 15, 25);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText(msg, this.width - 15, 25);
}
if (this.displayMsg) {
this.ctx.font = 'bold ' + (36 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
this.ctx.fillText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
}
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 0.5;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
msgs = this.displayMsg.split("\n");
if (msgs.length > 0) {
_results = [];
for (i = _j = 0, _len2 = msgs.length; _j < _len2; i = ++_j) {
msg = msgs[i];
if (i >= 5) {
offset = this.height * .08;
} else {
offset = 5;
}
this.ctx.font = 'bold ' + (12 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio);
if (window.innerWidth > 500 && false) {
_results.push(this.ctx.strokeText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio));
} else {
_results.push(void 0);
}
}
return _results;
}
}
};
SlimeVolleyball.prototype.incrementScore = function(score) {
if (score == null) score = 1;
this.score += score;
return this.showMessage("+" + score);
};
SlimeVolleyball.prototype.handleWin = function(winner) {
var about, container, leaderboard, p1, p2, p3, post, posted, show, text,
_this = this;
if (!this.freezeGame) {
this.freezeGame = true;
this.world.reset();
this.displayMsg = "\nGame over!\nClick to play again";
container = document.getElementById('content');
container.innerHTML = '';
leaderboard = new Clay.Leaderboard({
id: 14,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour'],
tabs: [
{
id: 14,
title: 'Cumulative',
cumulative: true,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour']
}, {
id: 14,
title: 'Personal Best',
self: true,
filters: ['all', 'month', 'day', 'hour']
}
]
});
p1 = document.createElement('p');
post = document.createElement('a');
post.href = 'javascript: void( 0 );';
text = document.createTextNode('Post My High Score');
post.appendChild(text);
posted = false;
post.onclick = function() {
if (!posted) {
leaderboard.post({
score: _this.score
}, function() {
return leaderboard.show({
html: "<a href='javascript: void( 0 );' onclick='facebook(" + _this.score + ");'>Post to Facebook</a> or <a href='javascript: void( 0 );' onclick='tweet(" + _this.score + ");'>Post to Twitter</a>"
});
});
return posted = true;
}
};
p1.appendChild(post);
p2 = document.createElement('p');
show = document.createElement('a');
show.href = 'javascript: void( 0 );';
text = document.createTextNode('View High Scores');
show.appendChild(text);
show.onclick = function() {
return leaderboard.show();
};
p2.appendChild(show);
p3 = document.createElement('p');
about = document.createElement('a');
about.href = 'javascript: void( 0 );';
text = document.createTextNode('About the Developer');
about.appendChild(text);
about.onclick = function() {
var ele, ui;
ele = document.getElementById('ui-content');
ele.innerHTML = "<p>My name is Austin Hallock, I'm a CS major at the University of Texas. I'm also co-founder of this site, <a href='http://clay.io' target='_BLANK'>Clay.io</a>, which is a hub for games that are playable in a browser <em>and</em> on your phone. I like making games :)</p><p>Twitter: <a href='http://twitter.com/austinhallock' target='_BLANK'>@austinhallock</a></p>";
ele.style.display = 'block';
ui = document.getElementById('ui');
ui.setAttribute('style', 'display: block !important');
return Clay.Stats.logStat({
name: 'aboutClick',
quantity: 1
});
};
p3.appendChild(about);
container.appendChild(p1);
container.appendChild(p2);
container.appendChild(p3);
document.getElementById('ui2').style.display = 'block';
if (this.score > 5000) {
(new Clay.Achievement({
id: 75
})).award();
}
if (this.score > 10000) {
(new Clay.Achievement({
id: 76
})).award();
}
if (this.score > 20000) {
(new Clay.Achievement({
id: 77
})).award();
}
if (this.score > 50000) {
(new Clay.Achievement({
id: 78
})).award();
}
if (this.score > 100000) {
(new Clay.Achievement({
id: 79
})).award();
}
if (this.score > 150000) {
return (new Clay.Achievement({
id: 80
})).award();
}
}
};
SlimeVolleyball.prototype.step = function(timestamp) {
this.next();
if (this.freezeGame) return this.draw();
this.world.step();
return this.draw();
};
SlimeVolleyball.prototype.buttonPressed = function(e) {
return Globals.Manager.popScene();
};
return SlimeVolleyball;
})(Scene);
| ctor | identifier_name |
slime_volleyball.js | var SlimeVolleyball,
__hasProp = Object.prototype.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() | ctor.prototype = parent.prototype; child.prototype = new ctor; child.__super__ = parent.prototype; return child; };
SlimeVolleyball = (function(_super) {
__extends(SlimeVolleyball, _super);
SlimeVolleyball.name = 'SlimeVolleyball';
function SlimeVolleyball() {
return SlimeVolleyball.__super__.constructor.apply(this, arguments);
}
SlimeVolleyball.prototype.init = function(dontOverrideInput) {
var bg, canvas, ctx, loader,
_this = this;
if (Globals.windowSize && Globals.windowSize.width) {
this.width = Globals.windowSize.width;
this.height = Globals.windowSize.height;
} else {
this.width = 800;
this.height = 447;
}
this.world || (this.world = new World(this.width, this.height, Globals.Input));
loader = Globals.Loader;
bg = document.createElement('canvas');
ctx = bg.getContext('2d');
ctx.fillStyle = '#ccc';
ctx.fillRect(0, 0, this.width, this.height);
this.bg = new StretchySprite(0, 0, this.width, this.height, 200, 1, bg);
this.freezeGame = true;
this.initialLoad = true;
this.refreshSprites();
this.loadingEnemy = {};
this.world.reset();
this.last = 0;
this.time = 15;
this.score = 0;
this.displayMsg = "\n Instructions:";
this.displayMsg += "\n Cross road back and forth, knock out foam sword foes";
this.displayMsg += "\n Use arrow keys to move";
this.displayMsg += "\n Getting hit by a green or white bus will kill you";
this.displayMsg += "\n Purple buses don't do damage, because you're Nick";
this.displayMsg += "\n ... and you're pretty much invincible";
this.displayMsg += "\n You have 15 seconds each time to cross the road";
this.displayMsg += "\n\nClick anywhere to start";
this.keyState = {
left: false,
right: false,
up: false
};
if (!dontOverrideInput) {
this.world.handleInput = function() {
return _this.world.p1.handleInput(Globals.Input);
};
}
SlimeVolleyball.__super__.init.call(this);
canvas = Globals.Manager.canvas;
_this = this;
canvas.addEventListener('click', function() {
return _this.handleMouseDown();
}, true);
return canvas.addEventListener('touchstart', function() {
return _this.handleMouseDown();
}, true);
};
SlimeVolleyball.prototype.refreshSprites = function() {
var gamepad, gamepadUI;
this.sprites = [];
this.sprites.push(this.bg, this.world.road, this.world.bushes, this.world.building1, this.world.building2, this.world.p1);
gamepad = new GamePad({
left: [0, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
right: [this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
up: [2 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
down: [3 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM]
});
this.buttons['gamepad'] = gamepad;
if (!!('ontouchstart' in window) ? 1 : 0) {
gamepadUI = new Sprite(0, this.height * .85, this.width, this.height * .15, Globals.Loader.getAsset('gamepad'));
return this.sprites.push(gamepadUI);
}
};
SlimeVolleyball.prototype.start = function() {
var _this = this;
document.getElementById('ui2').style.display = 'none';
document.getElementById('ui').style.display = 'none';
this.refreshSprites();
this.time = 15;
this.score = 0;
this.world.enemies = {};
this.world.foamEnemies = {};
this.displayMsg = null;
this.freezeGame = false;
this.world.p1.stop = false;
this.world.p1.x = this.width / 2 - this.world.p1.width / 2;
this.world.p1.y = this.height - this.world.p1.height;
if (this.world.p1.curDir === 'Down') this.world.p1.toggleDirection();
this.timeInterval = setInterval(function() {
if (_this.time === 1) {
_this.time--;
return _this.handleWin();
} else {
return _this.time--;
}
}, 1000);
Enemy.fetchCanvases();
this.interval = 2500;
this.loadEnemy(0);
this.loadEnemy(1);
this.loadEnemy(2);
this.loadEnemy(3);
return this.loadFoamEnemy('top');
};
SlimeVolleyball.prototype.handleMouseDown = function(e) {
if (this.freezeGame) {
this.start();
return this.world.lastStep = false;
}
};
SlimeVolleyball.prototype.loadFoamEnemy = function(dir) {
var enemy, id;
enemy = new FoamEnemy(dir);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
return this.world.foamEnemies[id] = enemy;
};
SlimeVolleyball.prototype.loadEnemy = function(lane) {
var enemy, id, _this;
if (!this.freezeGame) {
enemy = new Enemy(lane);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
this.world.enemies[id] = enemy;
_this = this;
return this.loadingEnemy[lane] = setTimeout(function() {
return _this.loadEnemy(lane);
}, (0.8 + 0.4 * Math.random()) * (10 * this.width / this.enemyVel));
}
};
SlimeVolleyball.prototype.showMessage = function(msg) {
var msgObj, textInterval;
if (window.innerWidth < 450) return;
msgObj = new Message(msg, this.world.p1.x, this.world.p1.y);
this.sprites.push(msgObj);
return textInterval = setInterval(function() {
if (msgObj.opacity > 0.005) {
msgObj.opacity -= 0.005;
return msgObj.fontSize += 0.2;
} else {
msgObj.remove = true;
return clearInterval(textInterval);
}
}, 10);
};
/*
# Set a minimum for it
if @interval - @intervalStep >= 1000 && @interval > 1000
@interval -= @intervalStep
@intervalStep += @intervalIncrease
else if @interval > 1000
@interval = 1000
else if @interval > 500
@interval -= 7
*/
SlimeVolleyball.prototype.inputChanged = function() {
var changed, currState, input, key, val, _ref;
input = Globals.Input;
changed = false;
_ref = this.keyState;
for (key in _ref) {
if (!__hasProp.call(_ref, key)) continue;
val = _ref[key];
currState = input[key](0);
if (val !== currState) {
if (!changed) changed = {};
changed[key] = currState;
this.keyState[key] = currState;
}
}
return changed;
};
SlimeVolleyball.prototype.pause = function() {
this.freezeGame = true;
return this.displayMsg = "Paused. Click to resume";
};
SlimeVolleyball.prototype.draw = function() {
var i, msg, msgs, offset, sprite, _i, _j, _len, _len2, _ref, _results;
this.ctx.clearRect(0, 0, this.width, this.height);
_ref = this.sprites;
for (i = _i = 0, _len = _ref.length; _i < _len; i = ++_i) {
sprite = _ref[i];
if (sprite && !sprite.remove) sprite.draw(this.ctx);
}
this.ctx.font = 'bold 18px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.textAlign = 'left';
msg = "Score: " + this.score;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.fillText(msg, 15, 25);
if (window.innerWidth > 500 && false) this.ctx.strokeText(msg, 15, 25);
this.ctx.textAlign = 'right';
/*
time = new Date().getTime()
@zzz = (time-@last) if !( time % 10 )
if !@zzz
@zzz = 0
*/
msg = "Time: " + this.time;
this.ctx.fillText(msg, this.width - 15, 25);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText(msg, this.width - 15, 25);
}
if (this.displayMsg) {
this.ctx.font = 'bold ' + (36 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
this.ctx.fillText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
}
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 0.5;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
msgs = this.displayMsg.split("\n");
if (msgs.length > 0) {
_results = [];
for (i = _j = 0, _len2 = msgs.length; _j < _len2; i = ++_j) {
msg = msgs[i];
if (i >= 5) {
offset = this.height * .08;
} else {
offset = 5;
}
this.ctx.font = 'bold ' + (12 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio);
if (window.innerWidth > 500 && false) {
_results.push(this.ctx.strokeText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio));
} else {
_results.push(void 0);
}
}
return _results;
}
}
};
SlimeVolleyball.prototype.incrementScore = function(score) {
if (score == null) score = 1;
this.score += score;
return this.showMessage("+" + score);
};
SlimeVolleyball.prototype.handleWin = function(winner) {
var about, container, leaderboard, p1, p2, p3, post, posted, show, text,
_this = this;
if (!this.freezeGame) {
this.freezeGame = true;
this.world.reset();
this.displayMsg = "\nGame over!\nClick to play again";
container = document.getElementById('content');
container.innerHTML = '';
leaderboard = new Clay.Leaderboard({
id: 14,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour'],
tabs: [
{
id: 14,
title: 'Cumulative',
cumulative: true,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour']
}, {
id: 14,
title: 'Personal Best',
self: true,
filters: ['all', 'month', 'day', 'hour']
}
]
});
p1 = document.createElement('p');
post = document.createElement('a');
post.href = 'javascript: void( 0 );';
text = document.createTextNode('Post My High Score');
post.appendChild(text);
posted = false;
post.onclick = function() {
if (!posted) {
leaderboard.post({
score: _this.score
}, function() {
return leaderboard.show({
html: "<a href='javascript: void( 0 );' onclick='facebook(" + _this.score + ");'>Post to Facebook</a> or <a href='javascript: void( 0 );' onclick='tweet(" + _this.score + ");'>Post to Twitter</a>"
});
});
return posted = true;
}
};
p1.appendChild(post);
p2 = document.createElement('p');
show = document.createElement('a');
show.href = 'javascript: void( 0 );';
text = document.createTextNode('View High Scores');
show.appendChild(text);
show.onclick = function() {
return leaderboard.show();
};
p2.appendChild(show);
p3 = document.createElement('p');
about = document.createElement('a');
about.href = 'javascript: void( 0 );';
text = document.createTextNode('About the Developer');
about.appendChild(text);
about.onclick = function() {
var ele, ui;
ele = document.getElementById('ui-content');
ele.innerHTML = "<p>My name is Austin Hallock, I'm a CS major at the University of Texas. I'm also co-founder of this site, <a href='http://clay.io' target='_BLANK'>Clay.io</a>, which is a hub for games that are playable in a browser <em>and</em> on your phone. I like making games :)</p><p>Twitter: <a href='http://twitter.com/austinhallock' target='_BLANK'>@austinhallock</a></p>";
ele.style.display = 'block';
ui = document.getElementById('ui');
ui.setAttribute('style', 'display: block !important');
return Clay.Stats.logStat({
name: 'aboutClick',
quantity: 1
});
};
p3.appendChild(about);
container.appendChild(p1);
container.appendChild(p2);
container.appendChild(p3);
document.getElementById('ui2').style.display = 'block';
if (this.score > 5000) {
(new Clay.Achievement({
id: 75
})).award();
}
if (this.score > 10000) {
(new Clay.Achievement({
id: 76
})).award();
}
if (this.score > 20000) {
(new Clay.Achievement({
id: 77
})).award();
}
if (this.score > 50000) {
(new Clay.Achievement({
id: 78
})).award();
}
if (this.score > 100000) {
(new Clay.Achievement({
id: 79
})).award();
}
if (this.score > 150000) {
return (new Clay.Achievement({
id: 80
})).award();
}
}
};
SlimeVolleyball.prototype.step = function(timestamp) {
this.next();
if (this.freezeGame) return this.draw();
this.world.step();
return this.draw();
};
SlimeVolleyball.prototype.buttonPressed = function(e) {
return Globals.Manager.popScene();
};
return SlimeVolleyball;
})(Scene);
| { this.constructor = child; } | identifier_body |
slime_volleyball.js | var SlimeVolleyball,
__hasProp = Object.prototype.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor; child.__super__ = parent.prototype; return child; };
SlimeVolleyball = (function(_super) {
__extends(SlimeVolleyball, _super);
SlimeVolleyball.name = 'SlimeVolleyball';
function SlimeVolleyball() {
return SlimeVolleyball.__super__.constructor.apply(this, arguments);
}
SlimeVolleyball.prototype.init = function(dontOverrideInput) {
var bg, canvas, ctx, loader,
_this = this;
if (Globals.windowSize && Globals.windowSize.width) {
this.width = Globals.windowSize.width;
this.height = Globals.windowSize.height;
} else {
this.width = 800;
this.height = 447;
}
this.world || (this.world = new World(this.width, this.height, Globals.Input));
loader = Globals.Loader;
bg = document.createElement('canvas');
ctx = bg.getContext('2d');
ctx.fillStyle = '#ccc';
ctx.fillRect(0, 0, this.width, this.height);
this.bg = new StretchySprite(0, 0, this.width, this.height, 200, 1, bg);
this.freezeGame = true;
this.initialLoad = true;
this.refreshSprites();
this.loadingEnemy = {};
this.world.reset();
this.last = 0;
this.time = 15;
this.score = 0;
this.displayMsg = "\n Instructions:";
this.displayMsg += "\n Cross road back and forth, knock out foam sword foes";
this.displayMsg += "\n Use arrow keys to move";
this.displayMsg += "\n Getting hit by a green or white bus will kill you";
this.displayMsg += "\n Purple buses don't do damage, because you're Nick";
this.displayMsg += "\n ... and you're pretty much invincible";
this.displayMsg += "\n You have 15 seconds each time to cross the road";
this.displayMsg += "\n\nClick anywhere to start";
this.keyState = {
left: false,
right: false,
up: false
};
if (!dontOverrideInput) {
this.world.handleInput = function() {
return _this.world.p1.handleInput(Globals.Input);
};
}
SlimeVolleyball.__super__.init.call(this);
canvas = Globals.Manager.canvas;
_this = this;
canvas.addEventListener('click', function() {
return _this.handleMouseDown(); | return _this.handleMouseDown();
}, true);
};
SlimeVolleyball.prototype.refreshSprites = function() {
var gamepad, gamepadUI;
this.sprites = [];
this.sprites.push(this.bg, this.world.road, this.world.bushes, this.world.building1, this.world.building2, this.world.p1);
gamepad = new GamePad({
left: [0, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
right: [this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
up: [2 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM],
down: [3 * this.width / 4, this.height - Constants.BOTTOM, this.width / 4, Constants.BOTTOM]
});
this.buttons['gamepad'] = gamepad;
if (!!('ontouchstart' in window) ? 1 : 0) {
gamepadUI = new Sprite(0, this.height * .85, this.width, this.height * .15, Globals.Loader.getAsset('gamepad'));
return this.sprites.push(gamepadUI);
}
};
SlimeVolleyball.prototype.start = function() {
var _this = this;
document.getElementById('ui2').style.display = 'none';
document.getElementById('ui').style.display = 'none';
this.refreshSprites();
this.time = 15;
this.score = 0;
this.world.enemies = {};
this.world.foamEnemies = {};
this.displayMsg = null;
this.freezeGame = false;
this.world.p1.stop = false;
this.world.p1.x = this.width / 2 - this.world.p1.width / 2;
this.world.p1.y = this.height - this.world.p1.height;
if (this.world.p1.curDir === 'Down') this.world.p1.toggleDirection();
this.timeInterval = setInterval(function() {
if (_this.time === 1) {
_this.time--;
return _this.handleWin();
} else {
return _this.time--;
}
}, 1000);
Enemy.fetchCanvases();
this.interval = 2500;
this.loadEnemy(0);
this.loadEnemy(1);
this.loadEnemy(2);
this.loadEnemy(3);
return this.loadFoamEnemy('top');
};
SlimeVolleyball.prototype.handleMouseDown = function(e) {
if (this.freezeGame) {
this.start();
return this.world.lastStep = false;
}
};
SlimeVolleyball.prototype.loadFoamEnemy = function(dir) {
var enemy, id;
enemy = new FoamEnemy(dir);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
return this.world.foamEnemies[id] = enemy;
};
SlimeVolleyball.prototype.loadEnemy = function(lane) {
var enemy, id, _this;
if (!this.freezeGame) {
enemy = new Enemy(lane);
id = (this.sprites.push(enemy)) - 1;
this.sprites[id].id = id;
this.world.enemies[id] = enemy;
_this = this;
return this.loadingEnemy[lane] = setTimeout(function() {
return _this.loadEnemy(lane);
}, (0.8 + 0.4 * Math.random()) * (10 * this.width / this.enemyVel));
}
};
SlimeVolleyball.prototype.showMessage = function(msg) {
var msgObj, textInterval;
if (window.innerWidth < 450) return;
msgObj = new Message(msg, this.world.p1.x, this.world.p1.y);
this.sprites.push(msgObj);
return textInterval = setInterval(function() {
if (msgObj.opacity > 0.005) {
msgObj.opacity -= 0.005;
return msgObj.fontSize += 0.2;
} else {
msgObj.remove = true;
return clearInterval(textInterval);
}
}, 10);
};
/*
# Set a minimum for it
if @interval - @intervalStep >= 1000 && @interval > 1000
@interval -= @intervalStep
@intervalStep += @intervalIncrease
else if @interval > 1000
@interval = 1000
else if @interval > 500
@interval -= 7
*/
SlimeVolleyball.prototype.inputChanged = function() {
var changed, currState, input, key, val, _ref;
input = Globals.Input;
changed = false;
_ref = this.keyState;
for (key in _ref) {
if (!__hasProp.call(_ref, key)) continue;
val = _ref[key];
currState = input[key](0);
if (val !== currState) {
if (!changed) changed = {};
changed[key] = currState;
this.keyState[key] = currState;
}
}
return changed;
};
SlimeVolleyball.prototype.pause = function() {
this.freezeGame = true;
return this.displayMsg = "Paused. Click to resume";
};
SlimeVolleyball.prototype.draw = function() {
var i, msg, msgs, offset, sprite, _i, _j, _len, _len2, _ref, _results;
this.ctx.clearRect(0, 0, this.width, this.height);
_ref = this.sprites;
for (i = _i = 0, _len = _ref.length; _i < _len; i = ++_i) {
sprite = _ref[i];
if (sprite && !sprite.remove) sprite.draw(this.ctx);
}
this.ctx.font = 'bold 18px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.textAlign = 'left';
msg = "Score: " + this.score;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.fillText(msg, 15, 25);
if (window.innerWidth > 500 && false) this.ctx.strokeText(msg, 15, 25);
this.ctx.textAlign = 'right';
/*
time = new Date().getTime()
@zzz = (time-@last) if !( time % 10 )
if !@zzz
@zzz = 0
*/
msg = "Time: " + this.time;
this.ctx.fillText(msg, this.width - 15, 25);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText(msg, this.width - 15, 25);
}
if (this.displayMsg) {
this.ctx.font = 'bold ' + (36 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
this.ctx.fillText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
if (window.innerWidth > 500 && false) {
this.ctx.strokeText("Nick VS. Bus", this.width / 2 - 20, this.height * .33);
}
this.ctx.fillStyle = '#ffffff';
this.ctx.lineWidth = 0.5;
this.ctx.strokeStyle = "rgba( 0, 0, 0, 0.8 )";
this.ctx.textAlign = 'center';
msgs = this.displayMsg.split("\n");
if (msgs.length > 0) {
_results = [];
for (i = _j = 0, _len2 = msgs.length; _j < _len2; i = ++_j) {
msg = msgs[i];
if (i >= 5) {
offset = this.height * .08;
} else {
offset = 5;
}
this.ctx.font = 'bold ' + (12 * Globals.sizeRatio) + 'px ' + Constants.MSG_FONT;
this.ctx.fillText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio);
if (window.innerWidth > 500 && false) {
_results.push(this.ctx.strokeText(msg, this.width / 2 - 20, this.height * .33 + offset + i * 14 * Globals.sizeRatio));
} else {
_results.push(void 0);
}
}
return _results;
}
}
};
SlimeVolleyball.prototype.incrementScore = function(score) {
if (score == null) score = 1;
this.score += score;
return this.showMessage("+" + score);
};
SlimeVolleyball.prototype.handleWin = function(winner) {
var about, container, leaderboard, p1, p2, p3, post, posted, show, text,
_this = this;
if (!this.freezeGame) {
this.freezeGame = true;
this.world.reset();
this.displayMsg = "\nGame over!\nClick to play again";
container = document.getElementById('content');
container.innerHTML = '';
leaderboard = new Clay.Leaderboard({
id: 14,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour'],
tabs: [
{
id: 14,
title: 'Cumulative',
cumulative: true,
showPersonal: true,
filters: ['all', 'month', 'day', 'hour']
}, {
id: 14,
title: 'Personal Best',
self: true,
filters: ['all', 'month', 'day', 'hour']
}
]
});
p1 = document.createElement('p');
post = document.createElement('a');
post.href = 'javascript: void( 0 );';
text = document.createTextNode('Post My High Score');
post.appendChild(text);
posted = false;
post.onclick = function() {
if (!posted) {
leaderboard.post({
score: _this.score
}, function() {
return leaderboard.show({
html: "<a href='javascript: void( 0 );' onclick='facebook(" + _this.score + ");'>Post to Facebook</a> or <a href='javascript: void( 0 );' onclick='tweet(" + _this.score + ");'>Post to Twitter</a>"
});
});
return posted = true;
}
};
p1.appendChild(post);
p2 = document.createElement('p');
show = document.createElement('a');
show.href = 'javascript: void( 0 );';
text = document.createTextNode('View High Scores');
show.appendChild(text);
show.onclick = function() {
return leaderboard.show();
};
p2.appendChild(show);
p3 = document.createElement('p');
about = document.createElement('a');
about.href = 'javascript: void( 0 );';
text = document.createTextNode('About the Developer');
about.appendChild(text);
about.onclick = function() {
var ele, ui;
ele = document.getElementById('ui-content');
ele.innerHTML = "<p>My name is Austin Hallock, I'm a CS major at the University of Texas. I'm also co-founder of this site, <a href='http://clay.io' target='_BLANK'>Clay.io</a>, which is a hub for games that are playable in a browser <em>and</em> on your phone. I like making games :)</p><p>Twitter: <a href='http://twitter.com/austinhallock' target='_BLANK'>@austinhallock</a></p>";
ele.style.display = 'block';
ui = document.getElementById('ui');
ui.setAttribute('style', 'display: block !important');
return Clay.Stats.logStat({
name: 'aboutClick',
quantity: 1
});
};
p3.appendChild(about);
container.appendChild(p1);
container.appendChild(p2);
container.appendChild(p3);
document.getElementById('ui2').style.display = 'block';
if (this.score > 5000) {
(new Clay.Achievement({
id: 75
})).award();
}
if (this.score > 10000) {
(new Clay.Achievement({
id: 76
})).award();
}
if (this.score > 20000) {
(new Clay.Achievement({
id: 77
})).award();
}
if (this.score > 50000) {
(new Clay.Achievement({
id: 78
})).award();
}
if (this.score > 100000) {
(new Clay.Achievement({
id: 79
})).award();
}
if (this.score > 150000) {
return (new Clay.Achievement({
id: 80
})).award();
}
}
};
SlimeVolleyball.prototype.step = function(timestamp) {
this.next();
if (this.freezeGame) return this.draw();
this.world.step();
return this.draw();
};
SlimeVolleyball.prototype.buttonPressed = function(e) {
return Globals.Manager.popScene();
};
return SlimeVolleyball;
})(Scene); | }, true);
return canvas.addEventListener('touchstart', function() { | random_line_split |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn from_node(node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
// ...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
}
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it" | false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/ | /// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool { | random_line_split |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn from_node(node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
// ...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 |
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it"
/// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool {
false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/
| {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
} | identifier_body |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn | (node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
// ...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
}
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it"
/// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool {
false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/
| from_node | identifier_name |
bar_chart_from_csv.py | # bar chart from a CSV
# Written in Python 2.7
# RCK
# 1-10-12
import matplotlib
matplotlib.use('Agg')
import gzip
import numpy as np
import pylab as pl
from scipy import stats
import scipy
import scikits.bootstrap as bootstrap
import math
from optparse import OptionParser
# Set up options
usage = """usage: %prog [options] outfile infile1_group1 [infile2_group1 infile3_group2 infile4_group2]
Permitted types for outfile are png, pdf, ps, eps, and svg
"""
parser = OptionParser(usage)
parser.add_option("-d", "--debug_messages", action = "store_true", dest = "debug_messages",
default = False, help = "print debug messages to stdout")
parser.add_option("-v", "--verbose", action = "store_true", dest = "verbose",
default = False, help = "print verbose messages to stdout")
parser.add_option("-x", "--xlabel", dest="x_label", type="string",
help="X-axis Label")
parser.add_option("-y", "--ylabel", dest="y_label", type="string",
help="Y-axis Label")
parser.add_option("-t", "--title", dest="title", type="string",
help="Graph Title")
parser.add_option("--ylim_max", dest="ylim_max", type="float",
help="Set the max ylim")
parser.add_option("--has_header", dest="has_header", action="store_true", default = False, help = "Contains header line, so ignore it")
parser.add_option("-s", "--separator", dest="separator", type="string", help="Separator")
parser.add_option("--ignorenan", action="store_true",dest="ignorenan", default=False, help = "If a line includes something that isn't a number, ignore that line.")
### grouping options
parser.add_option("--groups", dest="groups", type="int", help="The number of groups (X-ticks)") ## bars in the same group go on top of each other
parser.add_option("--xticks", dest="group_labels", type="string",
help="X-axis Column Label(s) (ticks)")
parser.add_option("--legend", dest="legend", type="string",
help="Labels to go in the legend, by colors") ## the individuals within a group
parser.add_option("--columns", dest="columns", type="string",
help="If in a multi-column file, select the columns you want to work.")
parser.add_option("--xsize", dest="xsize", type="int",
help="size in X dimension in px")
parser.add_option("--ysize", dest="ysize", type="int",
help="size in Y dimension in px")
### display options
parser.add_option("--stack", dest="stack", action = "store_true", help="Stacked Bar Chart") ## bars in the same group go on top of each other
parser.add_option("--pair", dest="pair", action = "store_true", help="Paired Bar Chart") ## bars in the same group go next to each other
parser.add_option("--error", dest="error", action = "store_true", default=False, help="Display Error Bars") ## Display the error bars.
parser.add_option("--show", dest="show", action="store_true", default = False, help = "Show the thing to be able to edit the image.")
## fetch the args
(options, args) = parser.parse_args()
## parameter errors
if len(args) < 2:
parser.error("incorrect number of arguments")
## labels to use for the columns
group_labels = []
if options.group_labels:
group_labels = options.group_labels.split(",")
legend_labels = []
if options.legend:
legend_labels = options.legend.split(",")
columns = []
if options.columns:
columns = [ (int(val) - 1) for val in options.columns.split(",") ]
if options.stack and options.pair: ## mutually exclusive
parser.error("--stack and --pair are mutually exclusive. Pick one")
if options.groups:
if not (options.stack or options.pair):
options.stack = True ## default
## output file name
outfilename = args[0]
## input filenames
inputfilenames = args[1:]
if options.groups:
# if len(inputfilenames) % options.groups:
# parser.error("Inputfile count must be evenly divisible by number of groups. That is, there must be the same number of items in each group.")
if options.group_labels and len(group_labels) != options.groups:
parser.error("The number of x-tick labels must match the number of groups.")
# if options.legend and len(legend_labels) != ( len(inputfilenames) / options.groups):
# parser.error("The number of legend labels must match the number of items in each group")
if not options.separator:
options.separator="," ## default
## read the data from the files
data = []
for inputfilename in inputfilenames:
file_data = []
if inputfilename[-3:] == ".gz":
fd = gzip.open(inputfilename)
else:
fd = open(inputfilename)
line_ct = 0
datums = []
for line in fd:
line = line.strip() ## strip off the end of line crap
if len(line) == 0 or line[0] == '#': ## if the line is blank or a comment
continue
if line_ct == 0 and options.has_header: ## the first line is a header line. skip it.
line_ct += 1
continue
line = line.split( options.separator )
try:
converted_line = [ float(val) for val in line ]
except ValueError:
if options.ignorenan:
continue ## move along
else: ## we want to FUCKING DIE
raise
if len(converted_line) > 1: ## multi-column! Yow!
member_ct = len(converted_line)
member_indexes = range(0, member_ct)
if options.columns:
member_ct = len(columns)
member_indexes = columns
for i in range(0, member_ct):
if len(datums) <= i:
datums.append( [] ) ## add another column.
datums[i].append( converted_line[ member_indexes[i] ] )
else:
file_data.append( converted_line[0] ) ## just do it as a single column
line_ct += 1
fd.close()
if options.debug_messages:
print "RAW INPUT"
print file_data
if len(datums) == 0:
data.append( file_data )
else:
data.extend( datums )
## BOOT STRAP HELPER - mean error
def Quantile(data, q, precision=1.0):
"""
Returns the q'th percentile of the distribution given in the argument
'data'. Uses the 'precision' parameter to control the noise level.
"""
N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))
norm_cumul = 1.0*N.cumsum() / len(data)
for i in range(0, len(norm_cumul)):
|
def bootstrap_error____old( data ):
x = np.array((data))
X = [] ## estimates
mean = np.mean(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
conf = 0.95
plower = (1-conf)/2.0
pupper = 1-plower
lower_ci, upper_ci = (Quantile(X, plower), Quantile(X, pupper))
diff_upper = upper_ci - mean
diff_lower = mean - lower_ci
return max( diff_upper, diff_lower )
def bootstrap_error( data, n_samples=None ):
x = np.array(data)
meanx = np.mean(x) #if debug:
try:
if (n_samples):
CIs = bootstrap.ci(data, scipy.mean, n_samples=n_samples)
else:
CIs = bootstrap.ci(data, scipy.mean) #, n_samples=1000)
err_size = max( (meanx - CIs[0]), (CIs[1] - meanx) )
return err_size
except (ValueError):
CIs = None
X = [] ## estimates
stdx = np.std(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
mean_X = np.mean(X)
std_X = np.std(X)
## re-sample means are not guaranteed to be quite right.
## Conf 0.95, loc=sample mean, scale = (np.std(X, ddof=1)/np.sqrt(len(X)))
conf_int = stats.norm.interval(0.95, loc=mean_X, scale=stats.sem(X))
#toperr = (mean_X - conf_int[0])
#boterr = (conf_int[1] - mean_X)
err_size = max( mean_X - conf_int[0], conf_int[1] - mean_X )
if (np.isnan(err_size)):
err_size = 0
return err_size #conf_int #conf int is more accurate, but bar chart doesn't support it.
## split the files into groups, and calculate the means and bootstrap errors
group_count = len(data) ## by default, there are as many groups as there are files
member_count = 1 ## and by default, there is only one member in each group.
if options.groups:
group_count = options.groups ## ooop, now there are this many groups
member_count = len(data) / group_count
if options.debug_messages:
print "GROUP COUNT %s" % group_count
print "MEMBER COUNT %s" % member_count
means = []
errors = []
data_index = 0
for i in range(0, group_count):
means.append( [] )
errors.append( [] )
for j in range(0, member_count): # tick 'em off (this is inefficient, but whatever)
means[-1].append( np.mean( data[data_index] ) )
if options.error:
errors[-1].append( bootstrap_error( data[data_index] ) )
data_index += 1 ## NEXT
if options.debug_messages:
print means
print errors
# print data_index
class Colors:
Black = (0.0, 0.0, 0.0, 1.0)
DarkGray = (0.65, 0.65, 0.65, 1.0)
Gray = (0.75, 0.75, 0.75, 1.0)
LightGray = (0.85, 0.85, 0.85, 1.0)
VeryLightGray = (0.9, 0.9, 0.9, 1.0)
White = (1.0, 1.0, 1.0, 1.0)
Transparent = (0, 0, 0, 0)
Purple = (0.55, 0.0, 0.55, 1.0)
LightPurple = (0.8, 0.7, 0.8, 1.0)
Blue = (0.20, 0.49, 0.95, 1.0)
LightBlue = (0.6, 0.7, 0.95, 1.0)
DarkBlue = (0.1, 0.3, 0.7, 1.0)
BlueGreen = (0.0, 1.0, 1.0, 1.0)
LightBlueGreen = (0.8, 1.0, 0.8, 1.0)
Green = (0.0, 0.7, 0.0, 1.0)
LightGreen = (0.8, 1.0, 0.8, 1.0)
Yellow = (0.9, 0.9, 0.0, 1.0)
Orange = (0.93, 0.67, 0.13, 1.0)
OrangeRed = (1.0, 0.7, 0.0, 1.0)
LightOrangeRed = (0.9, 0.7, 0.6, 1.0)
DarkOrangeRed = (0.5, 0.3, 0.2, 1.0)
Red = (0.95, 0, 0.0, 1.0)
LightPink = (0.8, 0.7, 0.7, 1.0)
DarkPink = (0.86, 0.62, 0.65, 1.0)
TransparentGray = (0.75, 0.75, 0.75, 0.5)
Default = (0.0, 0.0, 0.0, 1.0)
color_sets = [ Colors.Purple,
Colors.Gray,
Colors.Orange,
Colors.BlueGreen,
Colors.Yellow,
Colors.DarkPink,
Colors.LightGreen,
Colors.DarkOrangeRed,
Colors.LightPurple,
Colors.DarkGray,
Colors.Blue,
Colors.Red,
Colors.LightOrangeRed,
Colors.LightBlue,
Colors.VeryLightGray] #max seven items per group
artists = []
if (options.xsize and options.ysize):
my_dpi = 200
fig = pl.figure(figsize=(options.xsize/my_dpi, options.ysize/my_dpi), dpi=my_dpi)
else:
fig = pl.figure()
ax1 = fig.add_subplot(111)
indexes = np.arange(group_count)
total_width = 0.75
if not options.pair: ## stack it up (same case as no-stack with single-member groups)
width = total_width
bottoms = [ 0 for i in range(0, group_count) ] ## fill the bottoms with zeroes
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
if options.debug_messages:
print "SET!"
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], yerr=be_set, bottom=bottoms ) )
else:
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], bottom=bottoms ) )
if options.stack: ## we're stacking
for i in range(0, len(bottoms)): ## update the bottoms
bottoms[i] += mean_set[i]
elif options.pair: ## really an else here.
width = (total_width / member_count) * 0.5 ## divide this up appropriately
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ], yerr=be_set ) )
else:
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ] ) )
if options.x_label:
ax1.set_xlabel( options.x_label )
if options.y_label:
ax1.set_ylabel( options.y_label )
if options.title:
pl.title( options.title )
#print ax1.get_ylim()
if options.ylim_max:
pl.ylim(0,options.ylim_max)
else:
pl.ylim(0, ax1.get_ylim()[1])
#else
# pl.ylim(0,
## set the xticks
if len(group_labels) == 0: ## none defined
trunc_names = [ val.split('_')[0] for val in inputfilenames ]
for i in range(0, len(trunc_names), member_count):
if options.debug_messages:
print "G LABEL INDEX %s " % i
group_labels.append( trunc_names[i] )
if options.debug_messages:
print "GROUP LABELS"
print group_labels
pl.xticks(width/2+indexes+total_width/2., group_labels )
## set the legend
def proxy_artist( color ):
p = pl.Rectangle((0,0), 1,1, fc=color)
return p
if options.legend and len(artists) > 0 and len(legend_labels) > 0:
if options.debug_messages:
print
print "ARTISTS"
print artists
print "LABELS"
print legend_labels
proxies = []
for i in range(0, member_count):
proxies.append( proxy_artist( color_sets[i] ) )
if options.stack:
proxies.reverse()
legend_labels.reverse()
#loc='upper center'
pl.legend( proxies, legend_labels, loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)#, bbox_to_anchor=(1.01, 1), )
leg = pl.gca().get_legend()
ltext = leg.get_texts()
pl.setp( ltext, fontsize='small')
l,b,w,h = pl.axes().get_position().bounds
#pl.axes().set_position([0.1,b,w*.78,h])
if options.show:
pl.show()
pl.savefig(outfilename, bbox_inches='tight')
| if norm_cumul[i] > q:
return bins[i] | conditional_block |
bar_chart_from_csv.py | # bar chart from a CSV
# Written in Python 2.7
# RCK
# 1-10-12
import matplotlib
matplotlib.use('Agg')
import gzip
import numpy as np
import pylab as pl
from scipy import stats
import scipy
import scikits.bootstrap as bootstrap
import math
from optparse import OptionParser
# Set up options
usage = """usage: %prog [options] outfile infile1_group1 [infile2_group1 infile3_group2 infile4_group2]
Permitted types for outfile are png, pdf, ps, eps, and svg
"""
parser = OptionParser(usage)
parser.add_option("-d", "--debug_messages", action = "store_true", dest = "debug_messages",
default = False, help = "print debug messages to stdout")
parser.add_option("-v", "--verbose", action = "store_true", dest = "verbose",
default = False, help = "print verbose messages to stdout")
parser.add_option("-x", "--xlabel", dest="x_label", type="string",
help="X-axis Label")
parser.add_option("-y", "--ylabel", dest="y_label", type="string",
help="Y-axis Label")
parser.add_option("-t", "--title", dest="title", type="string",
help="Graph Title")
parser.add_option("--ylim_max", dest="ylim_max", type="float",
help="Set the max ylim")
parser.add_option("--has_header", dest="has_header", action="store_true", default = False, help = "Contains header line, so ignore it")
parser.add_option("-s", "--separator", dest="separator", type="string", help="Separator")
parser.add_option("--ignorenan", action="store_true",dest="ignorenan", default=False, help = "If a line includes something that isn't a number, ignore that line.")
### grouping options
parser.add_option("--groups", dest="groups", type="int", help="The number of groups (X-ticks)") ## bars in the same group go on top of each other
parser.add_option("--xticks", dest="group_labels", type="string",
help="X-axis Column Label(s) (ticks)")
parser.add_option("--legend", dest="legend", type="string",
help="Labels to go in the legend, by colors") ## the individuals within a group
parser.add_option("--columns", dest="columns", type="string",
help="If in a multi-column file, select the columns you want to work.")
parser.add_option("--xsize", dest="xsize", type="int",
help="size in X dimension in px")
parser.add_option("--ysize", dest="ysize", type="int",
help="size in Y dimension in px")
### display options
parser.add_option("--stack", dest="stack", action = "store_true", help="Stacked Bar Chart") ## bars in the same group go on top of each other
parser.add_option("--pair", dest="pair", action = "store_true", help="Paired Bar Chart") ## bars in the same group go next to each other
parser.add_option("--error", dest="error", action = "store_true", default=False, help="Display Error Bars") ## Display the error bars.
parser.add_option("--show", dest="show", action="store_true", default = False, help = "Show the thing to be able to edit the image.")
## fetch the args
(options, args) = parser.parse_args()
## parameter errors
if len(args) < 2:
parser.error("incorrect number of arguments")
## labels to use for the columns
group_labels = []
if options.group_labels:
group_labels = options.group_labels.split(",")
legend_labels = []
if options.legend:
legend_labels = options.legend.split(",")
columns = []
if options.columns:
columns = [ (int(val) - 1) for val in options.columns.split(",") ]
if options.stack and options.pair: ## mutually exclusive
parser.error("--stack and --pair are mutually exclusive. Pick one")
if options.groups:
if not (options.stack or options.pair):
options.stack = True ## default
## output file name
outfilename = args[0]
## input filenames
inputfilenames = args[1:]
if options.groups:
# if len(inputfilenames) % options.groups:
# parser.error("Inputfile count must be evenly divisible by number of groups. That is, there must be the same number of items in each group.")
if options.group_labels and len(group_labels) != options.groups:
parser.error("The number of x-tick labels must match the number of groups.")
# if options.legend and len(legend_labels) != ( len(inputfilenames) / options.groups):
# parser.error("The number of legend labels must match the number of items in each group")
if not options.separator:
options.separator="," ## default
## read the data from the files
data = []
for inputfilename in inputfilenames:
file_data = []
if inputfilename[-3:] == ".gz":
fd = gzip.open(inputfilename)
else:
fd = open(inputfilename)
line_ct = 0
datums = []
for line in fd:
line = line.strip() ## strip off the end of line crap
if len(line) == 0 or line[0] == '#': ## if the line is blank or a comment
continue
if line_ct == 0 and options.has_header: ## the first line is a header line. skip it.
line_ct += 1
continue
line = line.split( options.separator )
try:
converted_line = [ float(val) for val in line ]
except ValueError:
if options.ignorenan:
continue ## move along
else: ## we want to FUCKING DIE
raise
if len(converted_line) > 1: ## multi-column! Yow!
member_ct = len(converted_line)
member_indexes = range(0, member_ct)
if options.columns:
member_ct = len(columns)
member_indexes = columns
for i in range(0, member_ct):
if len(datums) <= i:
datums.append( [] ) ## add another column.
datums[i].append( converted_line[ member_indexes[i] ] )
else:
file_data.append( converted_line[0] ) ## just do it as a single column
line_ct += 1
fd.close()
if options.debug_messages:
print "RAW INPUT"
print file_data
if len(datums) == 0:
data.append( file_data )
else:
data.extend( datums )
## BOOT STRAP HELPER - mean error
def Quantile(data, q, precision=1.0):
"""
Returns the q'th percentile of the distribution given in the argument
'data'. Uses the 'precision' parameter to control the noise level.
"""
N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))
norm_cumul = 1.0*N.cumsum() / len(data)
for i in range(0, len(norm_cumul)):
if norm_cumul[i] > q:
return bins[i]
def bootstrap_error____old( data ):
x = np.array((data))
X = [] ## estimates
mean = np.mean(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
conf = 0.95
plower = (1-conf)/2.0
pupper = 1-plower
lower_ci, upper_ci = (Quantile(X, plower), Quantile(X, pupper))
diff_upper = upper_ci - mean
diff_lower = mean - lower_ci
return max( diff_upper, diff_lower )
def bootstrap_error( data, n_samples=None ):
x = np.array(data)
meanx = np.mean(x) #if debug:
try:
if (n_samples):
CIs = bootstrap.ci(data, scipy.mean, n_samples=n_samples)
else:
CIs = bootstrap.ci(data, scipy.mean) #, n_samples=1000)
err_size = max( (meanx - CIs[0]), (CIs[1] - meanx) )
return err_size
except (ValueError):
CIs = None
X = [] ## estimates
stdx = np.std(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
mean_X = np.mean(X)
std_X = np.std(X)
## re-sample means are not guaranteed to be quite right.
## Conf 0.95, loc=sample mean, scale = (np.std(X, ddof=1)/np.sqrt(len(X)))
conf_int = stats.norm.interval(0.95, loc=mean_X, scale=stats.sem(X))
#toperr = (mean_X - conf_int[0])
#boterr = (conf_int[1] - mean_X)
err_size = max( mean_X - conf_int[0], conf_int[1] - mean_X )
if (np.isnan(err_size)):
err_size = 0
return err_size #conf_int #conf int is more accurate, but bar chart doesn't support it.
## split the files into groups, and calculate the means and bootstrap errors
group_count = len(data) ## by default, there are as many groups as there are files
member_count = 1 ## and by default, there is only one member in each group.
if options.groups:
group_count = options.groups ## ooop, now there are this many groups
member_count = len(data) / group_count
if options.debug_messages:
print "GROUP COUNT %s" % group_count
print "MEMBER COUNT %s" % member_count
means = []
errors = []
data_index = 0
for i in range(0, group_count):
means.append( [] )
errors.append( [] )
for j in range(0, member_count): # tick 'em off (this is inefficient, but whatever)
means[-1].append( np.mean( data[data_index] ) )
if options.error:
errors[-1].append( bootstrap_error( data[data_index] ) )
data_index += 1 ## NEXT
if options.debug_messages:
print means
print errors
# print data_index
class | :
Black = (0.0, 0.0, 0.0, 1.0)
DarkGray = (0.65, 0.65, 0.65, 1.0)
Gray = (0.75, 0.75, 0.75, 1.0)
LightGray = (0.85, 0.85, 0.85, 1.0)
VeryLightGray = (0.9, 0.9, 0.9, 1.0)
White = (1.0, 1.0, 1.0, 1.0)
Transparent = (0, 0, 0, 0)
Purple = (0.55, 0.0, 0.55, 1.0)
LightPurple = (0.8, 0.7, 0.8, 1.0)
Blue = (0.20, 0.49, 0.95, 1.0)
LightBlue = (0.6, 0.7, 0.95, 1.0)
DarkBlue = (0.1, 0.3, 0.7, 1.0)
BlueGreen = (0.0, 1.0, 1.0, 1.0)
LightBlueGreen = (0.8, 1.0, 0.8, 1.0)
Green = (0.0, 0.7, 0.0, 1.0)
LightGreen = (0.8, 1.0, 0.8, 1.0)
Yellow = (0.9, 0.9, 0.0, 1.0)
Orange = (0.93, 0.67, 0.13, 1.0)
OrangeRed = (1.0, 0.7, 0.0, 1.0)
LightOrangeRed = (0.9, 0.7, 0.6, 1.0)
DarkOrangeRed = (0.5, 0.3, 0.2, 1.0)
Red = (0.95, 0, 0.0, 1.0)
LightPink = (0.8, 0.7, 0.7, 1.0)
DarkPink = (0.86, 0.62, 0.65, 1.0)
TransparentGray = (0.75, 0.75, 0.75, 0.5)
Default = (0.0, 0.0, 0.0, 1.0)
color_sets = [ Colors.Purple,
Colors.Gray,
Colors.Orange,
Colors.BlueGreen,
Colors.Yellow,
Colors.DarkPink,
Colors.LightGreen,
Colors.DarkOrangeRed,
Colors.LightPurple,
Colors.DarkGray,
Colors.Blue,
Colors.Red,
Colors.LightOrangeRed,
Colors.LightBlue,
Colors.VeryLightGray] #max seven items per group
artists = []
if (options.xsize and options.ysize):
my_dpi = 200
fig = pl.figure(figsize=(options.xsize/my_dpi, options.ysize/my_dpi), dpi=my_dpi)
else:
fig = pl.figure()
ax1 = fig.add_subplot(111)
indexes = np.arange(group_count)
total_width = 0.75
if not options.pair: ## stack it up (same case as no-stack with single-member groups)
width = total_width
bottoms = [ 0 for i in range(0, group_count) ] ## fill the bottoms with zeroes
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
if options.debug_messages:
print "SET!"
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], yerr=be_set, bottom=bottoms ) )
else:
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], bottom=bottoms ) )
if options.stack: ## we're stacking
for i in range(0, len(bottoms)): ## update the bottoms
bottoms[i] += mean_set[i]
elif options.pair: ## really an else here.
width = (total_width / member_count) * 0.5 ## divide this up appropriately
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ], yerr=be_set ) )
else:
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ] ) )
if options.x_label:
ax1.set_xlabel( options.x_label )
if options.y_label:
ax1.set_ylabel( options.y_label )
if options.title:
pl.title( options.title )
#print ax1.get_ylim()
if options.ylim_max:
pl.ylim(0,options.ylim_max)
else:
pl.ylim(0, ax1.get_ylim()[1])
#else
# pl.ylim(0,
## set the xticks
if len(group_labels) == 0: ## none defined
trunc_names = [ val.split('_')[0] for val in inputfilenames ]
for i in range(0, len(trunc_names), member_count):
if options.debug_messages:
print "G LABEL INDEX %s " % i
group_labels.append( trunc_names[i] )
if options.debug_messages:
print "GROUP LABELS"
print group_labels
pl.xticks(width/2+indexes+total_width/2., group_labels )
## set the legend
def proxy_artist( color ):
p = pl.Rectangle((0,0), 1,1, fc=color)
return p
if options.legend and len(artists) > 0 and len(legend_labels) > 0:
if options.debug_messages:
print
print "ARTISTS"
print artists
print "LABELS"
print legend_labels
proxies = []
for i in range(0, member_count):
proxies.append( proxy_artist( color_sets[i] ) )
if options.stack:
proxies.reverse()
legend_labels.reverse()
#loc='upper center'
pl.legend( proxies, legend_labels, loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)#, bbox_to_anchor=(1.01, 1), )
leg = pl.gca().get_legend()
ltext = leg.get_texts()
pl.setp( ltext, fontsize='small')
l,b,w,h = pl.axes().get_position().bounds
#pl.axes().set_position([0.1,b,w*.78,h])
if options.show:
pl.show()
pl.savefig(outfilename, bbox_inches='tight')
| Colors | identifier_name |
bar_chart_from_csv.py | # bar chart from a CSV
# Written in Python 2.7
# RCK
# 1-10-12
import matplotlib
matplotlib.use('Agg')
import gzip
import numpy as np
import pylab as pl
from scipy import stats
import scipy
import scikits.bootstrap as bootstrap
import math
from optparse import OptionParser
# Set up options
usage = """usage: %prog [options] outfile infile1_group1 [infile2_group1 infile3_group2 infile4_group2]
Permitted types for outfile are png, pdf, ps, eps, and svg
"""
parser = OptionParser(usage)
parser.add_option("-d", "--debug_messages", action = "store_true", dest = "debug_messages",
default = False, help = "print debug messages to stdout")
parser.add_option("-v", "--verbose", action = "store_true", dest = "verbose",
default = False, help = "print verbose messages to stdout")
parser.add_option("-x", "--xlabel", dest="x_label", type="string",
help="X-axis Label")
parser.add_option("-y", "--ylabel", dest="y_label", type="string",
help="Y-axis Label")
parser.add_option("-t", "--title", dest="title", type="string",
help="Graph Title")
parser.add_option("--ylim_max", dest="ylim_max", type="float",
help="Set the max ylim")
parser.add_option("--has_header", dest="has_header", action="store_true", default = False, help = "Contains header line, so ignore it")
parser.add_option("-s", "--separator", dest="separator", type="string", help="Separator")
parser.add_option("--ignorenan", action="store_true",dest="ignorenan", default=False, help = "If a line includes something that isn't a number, ignore that line.")
### grouping options
parser.add_option("--groups", dest="groups", type="int", help="The number of groups (X-ticks)") ## bars in the same group go on top of each other
parser.add_option("--xticks", dest="group_labels", type="string",
help="X-axis Column Label(s) (ticks)")
parser.add_option("--legend", dest="legend", type="string",
help="Labels to go in the legend, by colors") ## the individuals within a group
parser.add_option("--columns", dest="columns", type="string",
help="If in a multi-column file, select the columns you want to work.")
parser.add_option("--xsize", dest="xsize", type="int",
help="size in X dimension in px")
parser.add_option("--ysize", dest="ysize", type="int",
help="size in Y dimension in px")
### display options
parser.add_option("--stack", dest="stack", action = "store_true", help="Stacked Bar Chart") ## bars in the same group go on top of each other
parser.add_option("--pair", dest="pair", action = "store_true", help="Paired Bar Chart") ## bars in the same group go next to each other
parser.add_option("--error", dest="error", action = "store_true", default=False, help="Display Error Bars") ## Display the error bars.
parser.add_option("--show", dest="show", action="store_true", default = False, help = "Show the thing to be able to edit the image.")
## fetch the args
(options, args) = parser.parse_args()
## parameter errors
if len(args) < 2:
parser.error("incorrect number of arguments")
## labels to use for the columns
group_labels = []
if options.group_labels:
group_labels = options.group_labels.split(",")
legend_labels = []
if options.legend:
legend_labels = options.legend.split(",")
columns = []
if options.columns:
columns = [ (int(val) - 1) for val in options.columns.split(",") ]
if options.stack and options.pair: ## mutually exclusive
parser.error("--stack and --pair are mutually exclusive. Pick one")
if options.groups:
if not (options.stack or options.pair):
options.stack = True ## default
## output file name
outfilename = args[0]
## input filenames
inputfilenames = args[1:]
if options.groups:
# if len(inputfilenames) % options.groups:
# parser.error("Inputfile count must be evenly divisible by number of groups. That is, there must be the same number of items in each group.")
if options.group_labels and len(group_labels) != options.groups:
parser.error("The number of x-tick labels must match the number of groups.")
# if options.legend and len(legend_labels) != ( len(inputfilenames) / options.groups):
# parser.error("The number of legend labels must match the number of items in each group")
if not options.separator:
options.separator="," ## default
## read the data from the files
data = []
for inputfilename in inputfilenames:
file_data = []
if inputfilename[-3:] == ".gz":
fd = gzip.open(inputfilename)
else:
fd = open(inputfilename)
line_ct = 0
datums = []
for line in fd:
line = line.strip() ## strip off the end of line crap
if len(line) == 0 or line[0] == '#': ## if the line is blank or a comment
continue
if line_ct == 0 and options.has_header: ## the first line is a header line. skip it.
line_ct += 1
continue
line = line.split( options.separator )
try:
converted_line = [ float(val) for val in line ]
except ValueError:
if options.ignorenan:
continue ## move along
else: ## we want to FUCKING DIE
raise
if len(converted_line) > 1: ## multi-column! Yow!
member_ct = len(converted_line)
member_indexes = range(0, member_ct)
if options.columns:
member_ct = len(columns)
member_indexes = columns
for i in range(0, member_ct):
if len(datums) <= i:
datums.append( [] ) ## add another column.
datums[i].append( converted_line[ member_indexes[i] ] )
else:
file_data.append( converted_line[0] ) ## just do it as a single column
line_ct += 1
fd.close()
if options.debug_messages:
print "RAW INPUT"
print file_data
if len(datums) == 0:
data.append( file_data )
else:
data.extend( datums )
## BOOT STRAP HELPER - mean error
def Quantile(data, q, precision=1.0):
"""
Returns the q'th percentile of the distribution given in the argument
'data'. Uses the 'precision' parameter to control the noise level.
"""
N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))
norm_cumul = 1.0*N.cumsum() / len(data)
for i in range(0, len(norm_cumul)):
if norm_cumul[i] > q:
return bins[i]
def bootstrap_error____old( data ):
x = np.array((data))
X = [] ## estimates
mean = np.mean(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
conf = 0.95
plower = (1-conf)/2.0
pupper = 1-plower
lower_ci, upper_ci = (Quantile(X, plower), Quantile(X, pupper))
diff_upper = upper_ci - mean
diff_lower = mean - lower_ci
return max( diff_upper, diff_lower )
def bootstrap_error( data, n_samples=None ):
x = np.array(data)
meanx = np.mean(x) #if debug:
try:
if (n_samples):
CIs = bootstrap.ci(data, scipy.mean, n_samples=n_samples)
else:
CIs = bootstrap.ci(data, scipy.mean) #, n_samples=1000)
err_size = max( (meanx - CIs[0]), (CIs[1] - meanx) )
return err_size
except (ValueError):
CIs = None
X = [] ## estimates
stdx = np.std(x) |
mean_X = np.mean(X)
std_X = np.std(X)
## re-sample means are not guaranteed to be quite right.
## Conf 0.95, loc=sample mean, scale = (np.std(X, ddof=1)/np.sqrt(len(X)))
conf_int = stats.norm.interval(0.95, loc=mean_X, scale=stats.sem(X))
#toperr = (mean_X - conf_int[0])
#boterr = (conf_int[1] - mean_X)
err_size = max( mean_X - conf_int[0], conf_int[1] - mean_X )
if (np.isnan(err_size)):
err_size = 0
return err_size #conf_int #conf int is more accurate, but bar chart doesn't support it.
## split the files into groups, and calculate the means and bootstrap errors
group_count = len(data) ## by default, there are as many groups as there are files
member_count = 1 ## and by default, there is only one member in each group.
if options.groups:
group_count = options.groups ## ooop, now there are this many groups
member_count = len(data) / group_count
if options.debug_messages:
print "GROUP COUNT %s" % group_count
print "MEMBER COUNT %s" % member_count
means = []
errors = []
data_index = 0
for i in range(0, group_count):
means.append( [] )
errors.append( [] )
for j in range(0, member_count): # tick 'em off (this is inefficient, but whatever)
means[-1].append( np.mean( data[data_index] ) )
if options.error:
errors[-1].append( bootstrap_error( data[data_index] ) )
data_index += 1 ## NEXT
if options.debug_messages:
print means
print errors
# print data_index
class Colors:
Black = (0.0, 0.0, 0.0, 1.0)
DarkGray = (0.65, 0.65, 0.65, 1.0)
Gray = (0.75, 0.75, 0.75, 1.0)
LightGray = (0.85, 0.85, 0.85, 1.0)
VeryLightGray = (0.9, 0.9, 0.9, 1.0)
White = (1.0, 1.0, 1.0, 1.0)
Transparent = (0, 0, 0, 0)
Purple = (0.55, 0.0, 0.55, 1.0)
LightPurple = (0.8, 0.7, 0.8, 1.0)
Blue = (0.20, 0.49, 0.95, 1.0)
LightBlue = (0.6, 0.7, 0.95, 1.0)
DarkBlue = (0.1, 0.3, 0.7, 1.0)
BlueGreen = (0.0, 1.0, 1.0, 1.0)
LightBlueGreen = (0.8, 1.0, 0.8, 1.0)
Green = (0.0, 0.7, 0.0, 1.0)
LightGreen = (0.8, 1.0, 0.8, 1.0)
Yellow = (0.9, 0.9, 0.0, 1.0)
Orange = (0.93, 0.67, 0.13, 1.0)
OrangeRed = (1.0, 0.7, 0.0, 1.0)
LightOrangeRed = (0.9, 0.7, 0.6, 1.0)
DarkOrangeRed = (0.5, 0.3, 0.2, 1.0)
Red = (0.95, 0, 0.0, 1.0)
LightPink = (0.8, 0.7, 0.7, 1.0)
DarkPink = (0.86, 0.62, 0.65, 1.0)
TransparentGray = (0.75, 0.75, 0.75, 0.5)
Default = (0.0, 0.0, 0.0, 1.0)
color_sets = [ Colors.Purple,
Colors.Gray,
Colors.Orange,
Colors.BlueGreen,
Colors.Yellow,
Colors.DarkPink,
Colors.LightGreen,
Colors.DarkOrangeRed,
Colors.LightPurple,
Colors.DarkGray,
Colors.Blue,
Colors.Red,
Colors.LightOrangeRed,
Colors.LightBlue,
Colors.VeryLightGray] #max seven items per group
artists = []
if (options.xsize and options.ysize):
my_dpi = 200
fig = pl.figure(figsize=(options.xsize/my_dpi, options.ysize/my_dpi), dpi=my_dpi)
else:
fig = pl.figure()
ax1 = fig.add_subplot(111)
indexes = np.arange(group_count)
total_width = 0.75
if not options.pair: ## stack it up (same case as no-stack with single-member groups)
width = total_width
bottoms = [ 0 for i in range(0, group_count) ] ## fill the bottoms with zeroes
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
if options.debug_messages:
print "SET!"
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], yerr=be_set, bottom=bottoms ) )
else:
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], bottom=bottoms ) )
if options.stack: ## we're stacking
for i in range(0, len(bottoms)): ## update the bottoms
bottoms[i] += mean_set[i]
elif options.pair: ## really an else here.
width = (total_width / member_count) * 0.5 ## divide this up appropriately
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ], yerr=be_set ) )
else:
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ] ) )
if options.x_label:
ax1.set_xlabel( options.x_label )
if options.y_label:
ax1.set_ylabel( options.y_label )
if options.title:
pl.title( options.title )
#print ax1.get_ylim()
if options.ylim_max:
pl.ylim(0,options.ylim_max)
else:
pl.ylim(0, ax1.get_ylim()[1])
#else
# pl.ylim(0,
## set the xticks
if len(group_labels) == 0: ## none defined
trunc_names = [ val.split('_')[0] for val in inputfilenames ]
for i in range(0, len(trunc_names), member_count):
if options.debug_messages:
print "G LABEL INDEX %s " % i
group_labels.append( trunc_names[i] )
if options.debug_messages:
print "GROUP LABELS"
print group_labels
pl.xticks(width/2+indexes+total_width/2., group_labels )
## set the legend
def proxy_artist( color ):
p = pl.Rectangle((0,0), 1,1, fc=color)
return p
if options.legend and len(artists) > 0 and len(legend_labels) > 0:
if options.debug_messages:
print
print "ARTISTS"
print artists
print "LABELS"
print legend_labels
proxies = []
for i in range(0, member_count):
proxies.append( proxy_artist( color_sets[i] ) )
if options.stack:
proxies.reverse()
legend_labels.reverse()
#loc='upper center'
pl.legend( proxies, legend_labels, loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)#, bbox_to_anchor=(1.01, 1), )
leg = pl.gca().get_legend()
ltext = leg.get_texts()
pl.setp( ltext, fontsize='small')
l,b,w,h = pl.axes().get_position().bounds
#pl.axes().set_position([0.1,b,w*.78,h])
if options.show:
pl.show()
pl.savefig(outfilename, bbox_inches='tight') | for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) ) | random_line_split |
bar_chart_from_csv.py | # bar chart from a CSV
# Written in Python 2.7
# RCK
# 1-10-12
import matplotlib
matplotlib.use('Agg')
import gzip
import numpy as np
import pylab as pl
from scipy import stats
import scipy
import scikits.bootstrap as bootstrap
import math
from optparse import OptionParser
# Set up options
usage = """usage: %prog [options] outfile infile1_group1 [infile2_group1 infile3_group2 infile4_group2]
Permitted types for outfile are png, pdf, ps, eps, and svg
"""
parser = OptionParser(usage)
parser.add_option("-d", "--debug_messages", action = "store_true", dest = "debug_messages",
default = False, help = "print debug messages to stdout")
parser.add_option("-v", "--verbose", action = "store_true", dest = "verbose",
default = False, help = "print verbose messages to stdout")
parser.add_option("-x", "--xlabel", dest="x_label", type="string",
help="X-axis Label")
parser.add_option("-y", "--ylabel", dest="y_label", type="string",
help="Y-axis Label")
parser.add_option("-t", "--title", dest="title", type="string",
help="Graph Title")
parser.add_option("--ylim_max", dest="ylim_max", type="float",
help="Set the max ylim")
parser.add_option("--has_header", dest="has_header", action="store_true", default = False, help = "Contains header line, so ignore it")
parser.add_option("-s", "--separator", dest="separator", type="string", help="Separator")
parser.add_option("--ignorenan", action="store_true",dest="ignorenan", default=False, help = "If a line includes something that isn't a number, ignore that line.")
### grouping options
parser.add_option("--groups", dest="groups", type="int", help="The number of groups (X-ticks)") ## bars in the same group go on top of each other
parser.add_option("--xticks", dest="group_labels", type="string",
help="X-axis Column Label(s) (ticks)")
parser.add_option("--legend", dest="legend", type="string",
help="Labels to go in the legend, by colors") ## the individuals within a group
parser.add_option("--columns", dest="columns", type="string",
help="If in a multi-column file, select the columns you want to work.")
parser.add_option("--xsize", dest="xsize", type="int",
help="size in X dimension in px")
parser.add_option("--ysize", dest="ysize", type="int",
help="size in Y dimension in px")
### display options
parser.add_option("--stack", dest="stack", action = "store_true", help="Stacked Bar Chart") ## bars in the same group go on top of each other
parser.add_option("--pair", dest="pair", action = "store_true", help="Paired Bar Chart") ## bars in the same group go next to each other
parser.add_option("--error", dest="error", action = "store_true", default=False, help="Display Error Bars") ## Display the error bars.
parser.add_option("--show", dest="show", action="store_true", default = False, help = "Show the thing to be able to edit the image.")
## fetch the args
(options, args) = parser.parse_args()
## parameter errors
if len(args) < 2:
parser.error("incorrect number of arguments")
## labels to use for the columns
group_labels = []
if options.group_labels:
group_labels = options.group_labels.split(",")
legend_labels = []
if options.legend:
legend_labels = options.legend.split(",")
columns = []
if options.columns:
columns = [ (int(val) - 1) for val in options.columns.split(",") ]
if options.stack and options.pair: ## mutually exclusive
parser.error("--stack and --pair are mutually exclusive. Pick one")
if options.groups:
if not (options.stack or options.pair):
options.stack = True ## default
## output file name
outfilename = args[0]
## input filenames
inputfilenames = args[1:]
if options.groups:
# if len(inputfilenames) % options.groups:
# parser.error("Inputfile count must be evenly divisible by number of groups. That is, there must be the same number of items in each group.")
if options.group_labels and len(group_labels) != options.groups:
parser.error("The number of x-tick labels must match the number of groups.")
# if options.legend and len(legend_labels) != ( len(inputfilenames) / options.groups):
# parser.error("The number of legend labels must match the number of items in each group")
if not options.separator:
options.separator="," ## default
## read the data from the files
data = []
for inputfilename in inputfilenames:
file_data = []
if inputfilename[-3:] == ".gz":
fd = gzip.open(inputfilename)
else:
fd = open(inputfilename)
line_ct = 0
datums = []
for line in fd:
line = line.strip() ## strip off the end of line crap
if len(line) == 0 or line[0] == '#': ## if the line is blank or a comment
continue
if line_ct == 0 and options.has_header: ## the first line is a header line. skip it.
line_ct += 1
continue
line = line.split( options.separator )
try:
converted_line = [ float(val) for val in line ]
except ValueError:
if options.ignorenan:
continue ## move along
else: ## we want to FUCKING DIE
raise
if len(converted_line) > 1: ## multi-column! Yow!
member_ct = len(converted_line)
member_indexes = range(0, member_ct)
if options.columns:
member_ct = len(columns)
member_indexes = columns
for i in range(0, member_ct):
if len(datums) <= i:
datums.append( [] ) ## add another column.
datums[i].append( converted_line[ member_indexes[i] ] )
else:
file_data.append( converted_line[0] ) ## just do it as a single column
line_ct += 1
fd.close()
if options.debug_messages:
print "RAW INPUT"
print file_data
if len(datums) == 0:
data.append( file_data )
else:
data.extend( datums )
## BOOT STRAP HELPER - mean error
def Quantile(data, q, precision=1.0):
"""
Returns the q'th percentile of the distribution given in the argument
'data'. Uses the 'precision' parameter to control the noise level.
"""
N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))
norm_cumul = 1.0*N.cumsum() / len(data)
for i in range(0, len(norm_cumul)):
if norm_cumul[i] > q:
return bins[i]
def bootstrap_error____old( data ):
x = np.array((data))
X = [] ## estimates
mean = np.mean(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
conf = 0.95
plower = (1-conf)/2.0
pupper = 1-plower
lower_ci, upper_ci = (Quantile(X, plower), Quantile(X, pupper))
diff_upper = upper_ci - mean
diff_lower = mean - lower_ci
return max( diff_upper, diff_lower )
def bootstrap_error( data, n_samples=None ):
x = np.array(data)
meanx = np.mean(x) #if debug:
try:
if (n_samples):
CIs = bootstrap.ci(data, scipy.mean, n_samples=n_samples)
else:
CIs = bootstrap.ci(data, scipy.mean) #, n_samples=1000)
err_size = max( (meanx - CIs[0]), (CIs[1] - meanx) )
return err_size
except (ValueError):
CIs = None
X = [] ## estimates
stdx = np.std(x)
for xx in xrange(1000): ## do this 1000 times
X.append( np.mean( x[np.random.randint(len(x),size=len(x))] ) )
mean_X = np.mean(X)
std_X = np.std(X)
## re-sample means are not guaranteed to be quite right.
## Conf 0.95, loc=sample mean, scale = (np.std(X, ddof=1)/np.sqrt(len(X)))
conf_int = stats.norm.interval(0.95, loc=mean_X, scale=stats.sem(X))
#toperr = (mean_X - conf_int[0])
#boterr = (conf_int[1] - mean_X)
err_size = max( mean_X - conf_int[0], conf_int[1] - mean_X )
if (np.isnan(err_size)):
err_size = 0
return err_size #conf_int #conf int is more accurate, but bar chart doesn't support it.
## split the files into groups, and calculate the means and bootstrap errors
group_count = len(data) ## by default, there are as many groups as there are files
member_count = 1 ## and by default, there is only one member in each group.
if options.groups:
group_count = options.groups ## ooop, now there are this many groups
member_count = len(data) / group_count
if options.debug_messages:
print "GROUP COUNT %s" % group_count
print "MEMBER COUNT %s" % member_count
means = []
errors = []
data_index = 0
for i in range(0, group_count):
means.append( [] )
errors.append( [] )
for j in range(0, member_count): # tick 'em off (this is inefficient, but whatever)
means[-1].append( np.mean( data[data_index] ) )
if options.error:
errors[-1].append( bootstrap_error( data[data_index] ) )
data_index += 1 ## NEXT
if options.debug_messages:
print means
print errors
# print data_index
class Colors:
Black = (0.0, 0.0, 0.0, 1.0)
DarkGray = (0.65, 0.65, 0.65, 1.0)
Gray = (0.75, 0.75, 0.75, 1.0)
LightGray = (0.85, 0.85, 0.85, 1.0)
VeryLightGray = (0.9, 0.9, 0.9, 1.0)
White = (1.0, 1.0, 1.0, 1.0)
Transparent = (0, 0, 0, 0)
Purple = (0.55, 0.0, 0.55, 1.0)
LightPurple = (0.8, 0.7, 0.8, 1.0)
Blue = (0.20, 0.49, 0.95, 1.0)
LightBlue = (0.6, 0.7, 0.95, 1.0)
DarkBlue = (0.1, 0.3, 0.7, 1.0)
BlueGreen = (0.0, 1.0, 1.0, 1.0)
LightBlueGreen = (0.8, 1.0, 0.8, 1.0)
Green = (0.0, 0.7, 0.0, 1.0)
LightGreen = (0.8, 1.0, 0.8, 1.0)
Yellow = (0.9, 0.9, 0.0, 1.0)
Orange = (0.93, 0.67, 0.13, 1.0)
OrangeRed = (1.0, 0.7, 0.0, 1.0)
LightOrangeRed = (0.9, 0.7, 0.6, 1.0)
DarkOrangeRed = (0.5, 0.3, 0.2, 1.0)
Red = (0.95, 0, 0.0, 1.0)
LightPink = (0.8, 0.7, 0.7, 1.0)
DarkPink = (0.86, 0.62, 0.65, 1.0)
TransparentGray = (0.75, 0.75, 0.75, 0.5)
Default = (0.0, 0.0, 0.0, 1.0)
color_sets = [ Colors.Purple,
Colors.Gray,
Colors.Orange,
Colors.BlueGreen,
Colors.Yellow,
Colors.DarkPink,
Colors.LightGreen,
Colors.DarkOrangeRed,
Colors.LightPurple,
Colors.DarkGray,
Colors.Blue,
Colors.Red,
Colors.LightOrangeRed,
Colors.LightBlue,
Colors.VeryLightGray] #max seven items per group
artists = []
if (options.xsize and options.ysize):
my_dpi = 200
fig = pl.figure(figsize=(options.xsize/my_dpi, options.ysize/my_dpi), dpi=my_dpi)
else:
fig = pl.figure()
ax1 = fig.add_subplot(111)
indexes = np.arange(group_count)
total_width = 0.75
if not options.pair: ## stack it up (same case as no-stack with single-member groups)
width = total_width
bottoms = [ 0 for i in range(0, group_count) ] ## fill the bottoms with zeroes
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
if options.debug_messages:
print "SET!"
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], yerr=be_set, bottom=bottoms ) )
else:
artists.append( ax1.bar( indexes, mean_set, width, color=color_sets[ item_index ], bottom=bottoms ) )
if options.stack: ## we're stacking
for i in range(0, len(bottoms)): ## update the bottoms
bottoms[i] += mean_set[i]
elif options.pair: ## really an else here.
width = (total_width / member_count) * 0.5 ## divide this up appropriately
for item_index in range(0, member_count): ## start at the zeroth member (bottommost) and stack from there.
mean_set = [ group[item_index] for group in means ] ## pull it out
if options.error:
be_set = [ group[item_index] for group in errors ] ## pull it out
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ], yerr=be_set ) )
else:
artists.append( ax1.bar( width+indexes+(width*item_index), mean_set, width, color=color_sets[ item_index ] ) )
if options.x_label:
ax1.set_xlabel( options.x_label )
if options.y_label:
ax1.set_ylabel( options.y_label )
if options.title:
pl.title( options.title )
#print ax1.get_ylim()
if options.ylim_max:
pl.ylim(0,options.ylim_max)
else:
pl.ylim(0, ax1.get_ylim()[1])
#else
# pl.ylim(0,
## set the xticks
if len(group_labels) == 0: ## none defined
trunc_names = [ val.split('_')[0] for val in inputfilenames ]
for i in range(0, len(trunc_names), member_count):
if options.debug_messages:
print "G LABEL INDEX %s " % i
group_labels.append( trunc_names[i] )
if options.debug_messages:
print "GROUP LABELS"
print group_labels
pl.xticks(width/2+indexes+total_width/2., group_labels )
## set the legend
def proxy_artist( color ):
|
if options.legend and len(artists) > 0 and len(legend_labels) > 0:
if options.debug_messages:
print
print "ARTISTS"
print artists
print "LABELS"
print legend_labels
proxies = []
for i in range(0, member_count):
proxies.append( proxy_artist( color_sets[i] ) )
if options.stack:
proxies.reverse()
legend_labels.reverse()
#loc='upper center'
pl.legend( proxies, legend_labels, loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)#, bbox_to_anchor=(1.01, 1), )
leg = pl.gca().get_legend()
ltext = leg.get_texts()
pl.setp( ltext, fontsize='small')
l,b,w,h = pl.axes().get_position().bounds
#pl.axes().set_position([0.1,b,w*.78,h])
if options.show:
pl.show()
pl.savefig(outfilename, bbox_inches='tight')
| p = pl.Rectangle((0,0), 1,1, fc=color)
return p | identifier_body |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if !msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api != COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status != JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait] | impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
} | random_line_split | |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if !msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api != COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status != JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) |
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| {
self.pending_jobs.remove(idx);
} | conditional_block |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn | (&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if !msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api != COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status != JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| stopped | identifier_name |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if !msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api != COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status != JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> |
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
} | identifier_body |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if !self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if !self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
| impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
} | random_line_split | |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> |
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if !self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if !self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
} | identifier_body |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if !self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if !self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius |
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
} | conditional_block |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct | <B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if !self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if !self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| HLSoftBody | identifier_name |
wal.go | package wal
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"time"
"k8s-lx1036/k8s/storage/etcd/raft"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/wal/walpb"
"k8s.io/klog/v2"
)
const (
// 所有的日志类型如下所示,会被append追加存储在wal文件中
metadataType int64 = iota + 1
entryType
stateType
crcType
snapshotType
// warnSyncDuration is the amount of time allotted to an fsync before
// logging a warning
warnSyncDuration = time.Second
)
var (
crcTable = crc32.MakeTable(crc32.Castagnoli)
// SegmentSizeBytes is the preallocated size of each wal segment file.
// The actual size might be larger than this. In general, the default
// value should be used, but this is defined as an exported variable
// so that tests can set a different segment size.
SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
ErrFileNotFound = errors.New("wal: file not found")
ErrSnapshotNotFound = errors.New("wal: snapshot not found")
ErrMetadataConflict = errors.New("wal: conflicting metadata found")
ErrCRCMismatch = errors.New("wal: crc mismatch")
ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
)
// WAL用于以append记录的方式快速记录数据到持久化存储中
// 只可能处于append模式或者读模式,但是不能同时处于两种模式中
// 新创建的WAL处于append模式,刚打开的WAL处于读模式
type WAL struct {
// 存放WAL文件的目录
dir string // the living directory of the underlay files
// 根据前面的dir成员创建的File指针
// dirFile is a fd for the wal directory for syncing on Rename
dirFile *os.File
// 每个WAL文件最开始的地方,都需要写入的元数据
metadata []byte // metadata recorded at the head of each WAL
state raftpb.HardState // hardstate recorded at the head of WAL
start walpb.Snapshot // snapshot to start reading
decoder *decoder // decoder to decode records
readClose func() error // closer for decode reader
mu sync.Mutex
enti uint64 // index of the last entry saved to the wal
encoder *encoder // encoder to encode records
locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
fp *filePipeline
}
func (w *WAL) Save(hardState raftpb.HardState, entries []raftpb.Entry) error {
w.mu.Lock()
defer w.mu.Unlock()
// short cut, do not call sync
if raft.IsEmptyHardState(hardState) && len(entries) == 0 {
return nil
}
mustSync := raft.MustSync(hardState, w.state, len(entries))
for i := range entries {
if err := w.saveEntry(&entries[i]); err != nil {
return err
}
}
if err := w.saveState(&hardState); err != nil {
return err
}
curOff, err := w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if curOff < SegmentSizeBytes {
if mustSync {
return w.sync()
}
return nil
}
return w.cut()
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
b := pbutil.MustMarshal(e)
rec := &walpb.Record{Type: entryType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
w.enti = e.Index
return nil
}
func (w *WAL) saveState(s *raftpb.HardState) error {
if raft.IsEmptyHardState(*s) {
return nil
}
w.state = *s
b := pbutil.MustMarshal(s)
rec := &walpb.Record{Type: stateType, Data: b}
return w.encoder.encode(rec)
}
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
func (w *WAL) cut() error {
// close old wal file; truncate to avoid wasting space if an early cut
off, serr := w.tail().Seek(0, io.SeekCurrent)
if serr != nil {
return serr
}
if err := w.tail().Truncate(off); err != nil {
return err
}
if err := w.sync(); err != nil {
return err
}
fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
// create a temp wal file with name sequence + 1, or truncate the existing one
newTail, err := w.fp.Open()
if err != nil {
return err
}
// update writer and save the previous crc
w.locks = append(w.locks, newTail)
prevCrc := w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
if err = w.saveCrc(prevCrc); err != nil {
return err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
return err
}
if err = w.saveState(&w.state); err != nil {
return err
}
// atomically move temp wal file to wal file
if err = w.sync(); err != nil {
return err
}
off, err = w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err = os.Rename(newTail.Name(), fpath); err != nil {
return err
}
start := time.Now()
if err = fileutil.Fsync(w.dirFile); err != nil {
return err
}
walFsyncSec.Observe(time.Since(start).Seconds())
// reopen newTail with its new path so calls to Name() match the wal filename format
newTail.Close()
if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
return err
}
if _, err = newTail.Seek(off, io.SeekStart); err != nil {
return err
}
w.locks[len(w.locks)-1] = newTail
prevCrc = w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
klog.Infof(fmt.Sprintf("created a new WAL segment path:%s", fpath))
return nil
}
func (w *WAL) saveCrc(prevCrc uint32) error {
return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
}
func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
return err
}
b := pbutil.MustMarshal(&e)
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{Type: snapshotType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
// update enti only when snapshot is ahead of last index
if w.enti < e.Index {
w.enti = e.Index
}
return w.sync()
}
// 系统调用把snapshot写入持久化磁盘里
func (w *WAL) sync() error {
if w.encoder != nil {
if err := w.encoder.flush(); err != nil {
return err
}
}
start := time.Now()
err := fileutil.Fdatasync(w.tail().File)
took := time.Since(start)
if took > warnSyncDuration {
klog.Errorf(fmt.Sprintf("sync duration of %v, expected less than %v", took, warnSyncDuration))
}
walFsyncSec.Observe(took.Seconds())
return err
}
func (w *WAL) tail() *fileutil.LockedFile {
if len(w.locks) > 0 {
return w.locks[len(w.locks)-1]
}
return nil
}
// walName: %016x-%016x.wal, seq-index
func (w *WAL) seq() uint64 {
t := w.tail()
if t == nil {
return 0
}
seq, _, err := parseWALName(filepath.Base(t.Name()))
if err != nil {
klog.Fatalf(fmt.Sprintf("failed to parse WAL name:%s err:%v", t.Name(), err))
}
return seq
}
func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
if err := os.RemoveAll(w.dir); err != nil {
return nil, err
}
// On non-Windows platforms, hold the lock while renaming. Releasing
// the lock and trying to reacquire it quickly can be flaky because
// it's possible the process will fork to spawn a process while this is
// happening. The fds are set up as close-on-exec by the Go runtime,
// but there is a window between the fork and the exec where another
// process holds the lock.
if err := os.Rename(tmpdirpath, w.dir); err != nil {
if _, ok := err.(*os.LinkError); ok {
return w.renameWALUnlock(tmpdirpath)
}
return nil, err
}
w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
df, err := fileutil.OpenDir(w.dir)
w.dirFile = df
return w, err
}
func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
// rename of directory with locked files doesn't work on windows/cifs;
// close the WAL to release the locks so the directory can be renamed.
klog.Infof(fmt.Sprintf("closing WAL to release flock and retry directory renaming from %s to %s", tmpdirpath, w.dir))
w.Close()
if err := os.Rename(tmpdirpath, w.dir); err != nil {
return nil, err
}
// reopen and relock
newWAL, oerr := Open(w.dir, walpb.Snapshot{})
if oerr != nil {
return nil, oerr
}
if _, _, _, err := newWAL.ReadAll(); err != nil {
newWAL.Close()
return nil, err
}
return newWAL, nil
}
// ReadAll ReadAll函数负责从当前WAL实例中读取所有的记录
// 如果是可写模式,那么必须独处所有的记录,否则将报错
// 如果是只读模式,将尝试读取所有的记录,但是如果读出的记录没有满足快照数据的要求,将返回ErrSnapshotNotFound
// 而如果读出来的快照数据与要求的快照数据不匹配,返回所有的记录以及ErrSnapshotMismatch
// ReadAll reads out records of the current WAL.
// If opened in write mode, it must read out all records until EOF. Or an error
// will be returned.
// If opened in read mode, it will try to read all records if possible.
// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
// If loaded snap doesn't match with the expected one, it will return
// all the records and error ErrSnapshotMismatch.
// INFO: detect not-last-snap error.
// INFO: maybe loose the checking of match.
// After ReadAll, the WAL will be ready for appending new records.
func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{}
decoder := w.decoder
var match bool
// 循环读出record记录
for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
switch rec.Type {
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
ents = append(ents[:e.Index-w.start.Index-1], e)
}
w.enti = e.Index
case stateType:
state = mustUnmarshalState(rec.Data)
case metadataType:
if metadata != nil && !bytes.Equal(metadata, rec.Data) {
state.Reset()
return nil, state, nil, ErrMetadataConflict
}
metadata = rec.Data
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
state.Reset()
return nil, state, nil, ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var snap walpb.Snapshot
pbutil.MustUnmarshal(&snap, rec.Data)
if snap.Index == w.start.Index {
if snap.Term != w.start.Term {
state.Reset()
return nil, state, nil, ErrSnapshotMismatch
}
match = true
}
default:
state.Reset()
return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
}
}
// 到了这里,读取WAL日志的循环就结束了,中间可能出错
// 如果读完了所有文件,则err = io.EOF
// 如果是没有读完文件而中间有其他错误,那么err就不是EOF了,下面会分别处理只读模式和写模式
switch w.tail() {
case nil:
// We do not have to read out all entries in read mode.
// The last record maybe a partial written one, so
// ErrunexpectedEOF might be returned.
if err != io.EOF && err != io.ErrUnexpectedEOF {
state.Reset()
return nil, state, nil, err
}
default:
// We must read all of the entries if WAL is opened in write mode.
if err != io.EOF {
state.Reset()
return nil, state, nil, err
}
// decodeRecord() will return io.EOF if it detects a zero record,
// but this zero record may be followed by non-zero records from
// a torn write. Overwriting some of these non-zero records, but
// not all, will cause CRC errors on WAL open. Since the records
// were never fully synced to disk in the first place, it's safe
// to zero them out to avoid any CRC errors from new writes.
if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
return nil, state, nil, err
}
if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
return nil, state, nil, err
}
}
err = nil
if !match {
err = ErrSnapshotNotFound
}
// close decoder, disable reading
if w.readClose != nil {
w.readClose()
w.readClose = nil
}
w.start = walpb.Snapshot{}
w.metadata = metadata
if w.tail() != nil {
// create encoder (chain crc with the decoder), enable appending
w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
if err != nil {
return
}
}
w.decoder = nil
return metadata, state, ents, err
}
// Close closes the current WAL file and directory.
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.fp != nil {
w.fp.Close()
w.fp = nil
}
if w.tail() != nil {
if err := w.sync(); err != nil {
return err
}
}
for _, l := range w.locks {
if l == nil {
continue
}
if err := l.Close(); err != nil {
klog.Errorf(fmt.Sprintf("failed to unlock during closing wal: %v", err))
}
}
return w.dirFile.Close()
}
func (w *WAL) cleanupWAL() {
var err error
if err = w.Close(); err != nil {
klog.Fatalf(fmt.Sprintf("failed to close WAL during cleanup err:%v", err))
}
brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
if err = os.Rename(w.dir, brokenDirName); err != nil {
klog.Fatalf(fmt.Sprintf("failed to rename WAL during cleanup source-path:%s rename-path:%s err:%v", w.dir, brokenDirName, err))
}
}
// Create creates a WAL ready for appending records. The given metadata is
// recorded at the head of each WAL file, and can be retrieved with ReadAll.
func Create(dataDir string, metadata []byte) (*WAL, error) {
if Exist(dataDir) {
return nil, os.ErrExist
}
// keep temporary wal directory so WAL initialization appears atomic
tmpdirpath := filepath.Clean(dataDir) + ".tmp"
if fileutil.Exist(tmpdirpath) {
if err := os.RemoveAll(tmpdirpath); err != nil {
return nil, err
}
}
defer os.RemoveAll(tmpdirpath)
if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to create a temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, dataDir, err))
return nil, err
}
path := filepath.Join(tmpdirpath, walName(0, 0))
f, err := fileutil.LockFile(path, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to flock an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if _, err = f.Seek(0, io.SeekEnd); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to seek an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to preallocate an initial WAL file path:%s segment-bytes:%d err:%v", path, SegmentSizeBytes, err))
return nil, err
}
// INFO: crcType -> metadataType -> snapshotType -> entryType -> stateType
w := &WAL{
dir: dataDir,
metadata: metadata,
}
w.encoder, err = newFileEncoder(f.File, 0)
if err != nil {
return nil, err
}
w.locks = append(w.locks, f)
if err = w.saveCrc(0); err != nil {
return nil, err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
return nil, err
}
if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
return nil, err
}
if w, err = w.renameWAL(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to rename the temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, w.dir, err))
return nil, err
}
defer func() {
if err != nil {
w.cleanupWAL()
}
}()
// directory was renamed; sync parent dir to persist rename
parentDir, err := fileutil.OpenDir(filepath.Dir(w.dir))
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to open the parent data directory parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
defer parentDir.Close()
if err = fileutil.Fsync(parentDir); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to fsync the parent data directory file parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
return w, nil
}
// Open opens the WAL at the given snap.
// The snap SHOULD have been previously saved to the WAL, or the following
// ReadAll will fail.
// The returned WAL is ready to read and the first record will be the one after
// the given snap. The WAL cannot be appended to before reading out all of its
// previous records.
func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { | w, err := openAtIndex(dirpath, snap, true)
if err != nil {
return nil, err
}
if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
return nil, err
}
return w, nil
} | random_line_split | |
wal.go | package wal
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"time"
"k8s-lx1036/k8s/storage/etcd/raft"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/wal/walpb"
"k8s.io/klog/v2"
)
const (
// 所有的日志类型如下所示,会被append追加存储在wal文件中
metadataType int64 = iota + 1
entryType
stateType
crcType
snapshotType
// warnSyncDuration is the amount of time allotted to an fsync before
// logging a warning
warnSyncDuration = time.Second
)
var (
crcTable = crc32.MakeTable(crc32.Castagnoli)
// SegmentSizeBytes is the preallocated size of each wal segment file.
// The actual size might be larger than this. In general, the default
// value should be used, but this is defined as an exported variable
// so that tests can set a different segment size.
SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
ErrFileNotFound = errors.New("wal: file not found")
ErrSnapshotNotFound = errors.New("wal: snapshot not found")
ErrMetadataConflict = errors.New("wal: conflicting metadata found")
ErrCRCMismatch = errors.New("wal: crc mismatch")
ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
)
// WAL用于以append记录的方式快速记录数据到持久化存储中
// 只可能处于append模式或者读模式,但是不能同时处于两种模式中
// 新创建的WAL处于append模式,刚打开的WAL处于读模式
type WAL struct {
// 存放WAL文件的目录
dir string // the living directory of the underlay files
// 根据前面的dir成员创建的File指针
// dirFile is a fd for the wal directory for syncing on Rename
dirFile *os.File
// 每个WAL文件最开始的地方,都需要写入的元数据
metadata []byte // metadata recorded at the head of each WAL
state raftpb.HardState // hardstate recorded at the head of WAL
start walpb.Snapshot // snapshot to start reading
decoder *decoder // decoder to decode records
readClose func() error // closer for decode reader
mu sync.Mutex
enti uint64 // index of the last entry saved to the wal
encoder *encoder // encoder to encode records
locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
fp *filePipeline
}
func (w *WAL) Save(hardState raftpb.HardState, entries []raftpb.Entry) error {
w.mu.Lock()
defer w.mu.Unlock()
// short cut, do not call sync
if raft.IsEmptyHardState(hardState) && len(entries) == 0 {
return nil
}
mustSync := raft.MustSync(hardState, w.state, len(entries))
for i := range entries {
if err := w.saveEntry(&entries[i]); err != nil {
return err
}
}
if err := w.saveState(&hardState); err != nil {
return err
}
curOff, err := w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if curOff < SegmentSizeBytes {
if mustSync {
return w.sync()
}
return nil
}
return w.cut()
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
b := pbutil.MustMarshal(e)
rec := &walpb.Record{Type: entryType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
w.enti = e.Index
return nil
}
func (w *WAL) saveState(s *raftpb.HardState) error {
if raft.IsEmptyHardState(*s) {
return nil
}
w.state = *s
b := pbutil.MustMarshal(s)
rec := &walpb.Record{Type: stateType, Data: b}
return w.encoder.encode(rec)
}
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
func (w *WAL) cut() error {
// close old wal file; truncate to avoid wasting space if an early cut
off, serr := w.tail().Seek(0, io.SeekCurrent)
if serr != nil {
return serr
}
if err := w.tail().Truncate(off); err != nil {
return err
}
if err := w.sync(); err != nil {
return err
}
fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
// create a temp wal file with name sequence + 1, or truncate the existing one
newTail, err := w.fp.Open()
if err != nil {
return err
}
// update writer and save the previous crc
w.locks = append(w.locks, newTail)
prevCrc := w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
if err = w.saveCrc(prevCrc); err != nil {
return err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
return err
}
if err = w.saveState(&w.state); err != nil {
return err
}
// atomically move temp wal file to wal file
if err = w.sync(); err != nil {
return err
}
off, err = w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err = os.Rename(newTail.Name(), fpath); err != nil {
return err
}
start := time.Now()
if err = fileutil.Fsync(w.dirFile); err != nil {
return err
}
walFsyncSec.Observe(time.Since(start).Seconds())
// reopen newTail with its new path so calls to Name() match the wal filename format
newTail.Close()
if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
return err
}
if _, err = newTail.Seek(off, io.SeekStart); err != nil {
return err
}
w.locks[len(w.locks)-1] = newTail
prevCrc = w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
klog.Infof(fmt.Sprintf("created a new WAL segment path:%s", fpath))
return nil
}
func (w *WAL) saveCrc(prevCrc uint32) error {
return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
}
func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
return err
}
b := pbutil.MustMarshal(&e)
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{Type: snapshotType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
// update enti only when snapshot is ahead of last index
if w.enti < e.Index {
w.enti = e.Index
}
return w.sync()
}
// 系统调用把snapshot写入持久化磁盘里
func (w *WAL) sync() error {
if w.encoder != nil {
if err := w.encoder.flush(); err != nil {
return err
}
}
start := time.Now()
err := fileutil.Fdatasync(w.tail().File)
took := time.Since(start)
if took > warnSyncDuration {
klog.Errorf(fmt.Sprintf("sync duration of %v, expected less than %v", took, warnSyncDuration))
}
walFsyncSec.Observe(took.Seconds())
return err
}
func (w *WAL) tail() *fileutil.LockedFile {
if len(w.locks) > 0 {
return w.locks[len(w.locks)-1]
}
return nil
}
// walName: %016x-%016x.wal, seq-index
func (w *WAL) seq() uint64 {
t := w.tail()
if t == nil {
return 0
}
seq, _, err := parseWALName(filepath.Base(t.Name()))
if err != nil {
klog.Fatalf(fmt.Sprintf("failed to parse WAL name:%s err:%v", t.Name(), err))
}
return seq
}
func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
if err := os.RemoveAll(w.dir); err != nil {
return nil, err
}
// On non-Windows platforms, hold the lock while renaming. Releasing
// the lock and trying to reacquire it quickly can be flaky because
// it's possible the proces | rk to spawn a process while this is
// happening. The fds are set up as close-on-exec by the Go runtime,
// but there is a window between the fork and the exec where another
// process holds the lock.
if err := os.Rename(tmpdirpath, w.dir); err != nil {
if _, ok := err.(*os.LinkError); ok {
return w.renameWALUnlock(tmpdirpath)
}
return nil, err
}
w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
df, err := fileutil.OpenDir(w.dir)
w.dirFile = df
return w, err
}
func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
// rename of directory with locked files doesn't work on windows/cifs;
// close the WAL to release the locks so the directory can be renamed.
klog.Infof(fmt.Sprintf("closing WAL to release flock and retry directory renaming from %s to %s", tmpdirpath, w.dir))
w.Close()
if err := os.Rename(tmpdirpath, w.dir); err != nil {
return nil, err
}
// reopen and relock
newWAL, oerr := Open(w.dir, walpb.Snapshot{})
if oerr != nil {
return nil, oerr
}
if _, _, _, err := newWAL.ReadAll(); err != nil {
newWAL.Close()
return nil, err
}
return newWAL, nil
}
// ReadAll ReadAll函数负责从当前WAL实例中读取所有的记录
// 如果是可写模式,那么必须独处所有的记录,否则将报错
// 如果是只读模式,将尝试读取所有的记录,但是如果读出的记录没有满足快照数据的要求,将返回ErrSnapshotNotFound
// 而如果读出来的快照数据与要求的快照数据不匹配,返回所有的记录以及ErrSnapshotMismatch
// ReadAll reads out records of the current WAL.
// If opened in write mode, it must read out all records until EOF. Or an error
// will be returned.
// If opened in read mode, it will try to read all records if possible.
// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
// If loaded snap doesn't match with the expected one, it will return
// all the records and error ErrSnapshotMismatch.
// INFO: detect not-last-snap error.
// INFO: maybe loose the checking of match.
// After ReadAll, the WAL will be ready for appending new records.
func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{}
decoder := w.decoder
var match bool
// 循环读出record记录
for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
switch rec.Type {
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
ents = append(ents[:e.Index-w.start.Index-1], e)
}
w.enti = e.Index
case stateType:
state = mustUnmarshalState(rec.Data)
case metadataType:
if metadata != nil && !bytes.Equal(metadata, rec.Data) {
state.Reset()
return nil, state, nil, ErrMetadataConflict
}
metadata = rec.Data
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
state.Reset()
return nil, state, nil, ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var snap walpb.Snapshot
pbutil.MustUnmarshal(&snap, rec.Data)
if snap.Index == w.start.Index {
if snap.Term != w.start.Term {
state.Reset()
return nil, state, nil, ErrSnapshotMismatch
}
match = true
}
default:
state.Reset()
return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
}
}
// 到了这里,读取WAL日志的循环就结束了,中间可能出错
// 如果读完了所有文件,则err = io.EOF
// 如果是没有读完文件而中间有其他错误,那么err就不是EOF了,下面会分别处理只读模式和写模式
switch w.tail() {
case nil:
// We do not have to read out all entries in read mode.
// The last record maybe a partial written one, so
// ErrunexpectedEOF might be returned.
if err != io.EOF && err != io.ErrUnexpectedEOF {
state.Reset()
return nil, state, nil, err
}
default:
// We must read all of the entries if WAL is opened in write mode.
if err != io.EOF {
state.Reset()
return nil, state, nil, err
}
// decodeRecord() will return io.EOF if it detects a zero record,
// but this zero record may be followed by non-zero records from
// a torn write. Overwriting some of these non-zero records, but
// not all, will cause CRC errors on WAL open. Since the records
// were never fully synced to disk in the first place, it's safe
// to zero them out to avoid any CRC errors from new writes.
if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
return nil, state, nil, err
}
if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
return nil, state, nil, err
}
}
err = nil
if !match {
err = ErrSnapshotNotFound
}
// close decoder, disable reading
if w.readClose != nil {
w.readClose()
w.readClose = nil
}
w.start = walpb.Snapshot{}
w.metadata = metadata
if w.tail() != nil {
// create encoder (chain crc with the decoder), enable appending
w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
if err != nil {
return
}
}
w.decoder = nil
return metadata, state, ents, err
}
// Close closes the current WAL file and directory.
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.fp != nil {
w.fp.Close()
w.fp = nil
}
if w.tail() != nil {
if err := w.sync(); err != nil {
return err
}
}
for _, l := range w.locks {
if l == nil {
continue
}
if err := l.Close(); err != nil {
klog.Errorf(fmt.Sprintf("failed to unlock during closing wal: %v", err))
}
}
return w.dirFile.Close()
}
func (w *WAL) cleanupWAL() {
var err error
if err = w.Close(); err != nil {
klog.Fatalf(fmt.Sprintf("failed to close WAL during cleanup err:%v", err))
}
brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
if err = os.Rename(w.dir, brokenDirName); err != nil {
klog.Fatalf(fmt.Sprintf("failed to rename WAL during cleanup source-path:%s rename-path:%s err:%v", w.dir, brokenDirName, err))
}
}
// Create creates a WAL ready for appending records. The given metadata is
// recorded at the head of each WAL file, and can be retrieved with ReadAll.
func Create(dataDir string, metadata []byte) (*WAL, error) {
if Exist(dataDir) {
return nil, os.ErrExist
}
// keep temporary wal directory so WAL initialization appears atomic
tmpdirpath := filepath.Clean(dataDir) + ".tmp"
if fileutil.Exist(tmpdirpath) {
if err := os.RemoveAll(tmpdirpath); err != nil {
return nil, err
}
}
defer os.RemoveAll(tmpdirpath)
if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to create a temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, dataDir, err))
return nil, err
}
path := filepath.Join(tmpdirpath, walName(0, 0))
f, err := fileutil.LockFile(path, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to flock an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if _, err = f.Seek(0, io.SeekEnd); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to seek an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to preallocate an initial WAL file path:%s segment-bytes:%d err:%v", path, SegmentSizeBytes, err))
return nil, err
}
// INFO: crcType -> metadataType -> snapshotType -> entryType -> stateType
w := &WAL{
dir: dataDir,
metadata: metadata,
}
w.encoder, err = newFileEncoder(f.File, 0)
if err != nil {
return nil, err
}
w.locks = append(w.locks, f)
if err = w.saveCrc(0); err != nil {
return nil, err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
return nil, err
}
if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
return nil, err
}
if w, err = w.renameWAL(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to rename the temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, w.dir, err))
return nil, err
}
defer func() {
if err != nil {
w.cleanupWAL()
}
}()
// directory was renamed; sync parent dir to persist rename
parentDir, err := fileutil.OpenDir(filepath.Dir(w.dir))
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to open the parent data directory parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
defer parentDir.Close()
if err = fileutil.Fsync(parentDir); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to fsync the parent data directory file parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
return w, nil
}
// Open opens the WAL at the given snap.
// The snap SHOULD have been previously saved to the WAL, or the following
// ReadAll will fail.
// The returned WAL is ready to read and the first record will be the one after
// the given snap. The WAL cannot be appended to before reading out all of its
// previous records.
func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
w, err := openAtIndex(dirpath, snap, true)
if err != nil {
return nil, err
}
if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
return nil, err
}
return w, nil
}
| s will fo | identifier_name |
wal.go | package wal
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"time"
"k8s-lx1036/k8s/storage/etcd/raft"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/wal/walpb"
"k8s.io/klog/v2"
)
const (
// 所有的日志类型如下所示,会被append追加存储在wal文件中
metadataType int64 = iota + 1
entryType
stateType
crcType
snapshotType
// warnSyncDuration is the amount of time allotted to an fsync before
// logging a warning
warnSyncDuration = time.Second
)
var (
crcTable = crc32.MakeTable(crc32.Castagnoli)
// SegmentSizeBytes is the preallocated size of each wal segment file.
// The actual size might be larger than this. In general, the default
// value should be used, but this is defined as an exported variable
// so that tests can set a different segment size.
SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
ErrFileNotFound = errors.New("wal: file not found")
ErrSnapshotNotFound = errors.New("wal: snapshot not found")
ErrMetadataConflict = errors.New("wal: conflicting metadata found")
ErrCRCMismatch = errors.New("wal: crc mismatch")
ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
)
// WAL用于以append记录的方式快速记录数据到持久化存储中
// 只可能处于append模式或者读模式,但是不能同时处于两种模式中
// 新创建的WAL处于append模式,刚打开的WAL处于读模式
type WAL struct {
// 存放WAL文件的目录
dir string // the living directory of the underlay files
// 根据前面的dir成员创建的File指针
// dirFile is a fd for the wal directory for syncing on Rename
dirFile *os.File
// 每个WAL文件最开始的地方,都需要写入的元数据
metadata []byte // metadata recorded at the head of each WAL
state raftpb.HardState // hardstate recorded at the head of WAL
start walpb.Snapshot // snapshot to start reading
decoder *decoder // decoder to decode records
readClose func() error // closer for decode reader
mu sync.Mutex
enti uint64 // index of the last entry saved to the wal
encoder *encoder // encoder to encode records
locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
fp *filePipeline
}
func (w *WAL) Save(hardState raftpb.HardState, entries []raftpb.Entry) error {
w.mu.Lock()
defer w.mu.Unlock()
// short cut, do not call sync
if raft.IsEmptyHardState(hardState) && len(entries) == 0 {
return nil
}
mustSync := raft.MustSync(hardState, w.state, len(entries))
for i := range entries {
if err := w.saveEntry(&entries[i]); err != nil {
return err
}
}
if err := w.saveState(&hardState); err != nil {
return err
}
curOff, err := w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if curOff < SegmentSizeBytes {
if mustSync {
return w.sync()
}
return nil
}
return w.cut()
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
b := pbutil.MustMarshal(e)
rec := &walpb.Record{Type: entryType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
w.enti = e.Index
return nil
}
func (w *WAL) saveState(s *raftpb.HardState) error {
if raft.IsEmptyHardState(*s) {
return nil
}
w.state = *s
b := pbutil.MustMarshal(s)
rec := &walpb.Record{Type: stateType, Data: b}
return w.encoder.encode(rec)
}
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
func (w *WAL) cut() error {
// close old wal file; truncate to avoid wasting space if an early cut
off, serr := w.tail().Seek(0, io.SeekCurrent)
if serr != nil {
return serr
}
if err := w.tail().Truncate(off); err != nil {
return err
}
if err := w.sync(); err != nil {
return err
}
fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
// create a temp wal file with name sequence + 1, or truncate the existing one
newTail, err := w.fp.Open()
if err != nil {
return err
}
// update writer and save the previous crc
w.locks = append(w.locks, newTail)
prevCrc := w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
if err = w.saveCrc(prevCrc); err != nil {
return err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
return err
}
if err = w.saveState(&w.state); err != nil {
return err
}
// atomically move temp wal file to wal file
if err = w.sync(); err != nil {
return err
}
off, err = w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err = os.Rename(newTail.Name(), fpath); err != nil {
return err
}
start := time.Now()
if err = fileutil.Fsync(w.dirFile); err != nil {
return err
}
walFsyncSec.Observe(time.Since(start).Seconds())
// reopen newTail with its new path so calls to Name() match the wal filename format
newTail.Close()
if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
return err
}
if _, err = newTail.Seek(off, io.SeekStart); err != nil {
return err
}
w.locks[len(w.locks)-1] = newTail
prevCrc = w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
klog.Infof(fmt.Sprintf("created a new WAL segment path:%s", fpath))
return nil
}
func (w *WAL) saveCrc(prevCrc uint32) error {
return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
}
func (w *WAL) SaveSnapshot(e wa | or {
if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
return err
}
b := pbutil.MustMarshal(&e)
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{Type: snapshotType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
// update enti only when snapshot is ahead of last index
if w.enti < e.Index {
w.enti = e.Index
}
return w.sync()
}
// 系统调用把snapshot写入持久化磁盘里
func (w *WAL) sync() error {
if w.encoder != nil {
if err := w.encoder.flush(); err != nil {
return err
}
}
start := time.Now()
err := fileutil.Fdatasync(w.tail().File)
took := time.Since(start)
if took > warnSyncDuration {
klog.Errorf(fmt.Sprintf("sync duration of %v, expected less than %v", took, warnSyncDuration))
}
walFsyncSec.Observe(took.Seconds())
return err
}
func (w *WAL) tail() *fileutil.LockedFile {
if len(w.locks) > 0 {
return w.locks[len(w.locks)-1]
}
return nil
}
// walName: %016x-%016x.wal, seq-index
func (w *WAL) seq() uint64 {
t := w.tail()
if t == nil {
return 0
}
seq, _, err := parseWALName(filepath.Base(t.Name()))
if err != nil {
klog.Fatalf(fmt.Sprintf("failed to parse WAL name:%s err:%v", t.Name(), err))
}
return seq
}
func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
if err := os.RemoveAll(w.dir); err != nil {
return nil, err
}
// On non-Windows platforms, hold the lock while renaming. Releasing
// the lock and trying to reacquire it quickly can be flaky because
// it's possible the process will fork to spawn a process while this is
// happening. The fds are set up as close-on-exec by the Go runtime,
// but there is a window between the fork and the exec where another
// process holds the lock.
if err := os.Rename(tmpdirpath, w.dir); err != nil {
if _, ok := err.(*os.LinkError); ok {
return w.renameWALUnlock(tmpdirpath)
}
return nil, err
}
w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
df, err := fileutil.OpenDir(w.dir)
w.dirFile = df
return w, err
}
func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
// rename of directory with locked files doesn't work on windows/cifs;
// close the WAL to release the locks so the directory can be renamed.
klog.Infof(fmt.Sprintf("closing WAL to release flock and retry directory renaming from %s to %s", tmpdirpath, w.dir))
w.Close()
if err := os.Rename(tmpdirpath, w.dir); err != nil {
return nil, err
}
// reopen and relock
newWAL, oerr := Open(w.dir, walpb.Snapshot{})
if oerr != nil {
return nil, oerr
}
if _, _, _, err := newWAL.ReadAll(); err != nil {
newWAL.Close()
return nil, err
}
return newWAL, nil
}
// ReadAll ReadAll函数负责从当前WAL实例中读取所有的记录
// 如果是可写模式,那么必须独处所有的记录,否则将报错
// 如果是只读模式,将尝试读取所有的记录,但是如果读出的记录没有满足快照数据的要求,将返回ErrSnapshotNotFound
// 而如果读出来的快照数据与要求的快照数据不匹配,返回所有的记录以及ErrSnapshotMismatch
// ReadAll reads out records of the current WAL.
// If opened in write mode, it must read out all records until EOF. Or an error
// will be returned.
// If opened in read mode, it will try to read all records if possible.
// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
// If loaded snap doesn't match with the expected one, it will return
// all the records and error ErrSnapshotMismatch.
// INFO: detect not-last-snap error.
// INFO: maybe loose the checking of match.
// After ReadAll, the WAL will be ready for appending new records.
func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{}
decoder := w.decoder
var match bool
// 循环读出record记录
for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
switch rec.Type {
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
ents = append(ents[:e.Index-w.start.Index-1], e)
}
w.enti = e.Index
case stateType:
state = mustUnmarshalState(rec.Data)
case metadataType:
if metadata != nil && !bytes.Equal(metadata, rec.Data) {
state.Reset()
return nil, state, nil, ErrMetadataConflict
}
metadata = rec.Data
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
state.Reset()
return nil, state, nil, ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var snap walpb.Snapshot
pbutil.MustUnmarshal(&snap, rec.Data)
if snap.Index == w.start.Index {
if snap.Term != w.start.Term {
state.Reset()
return nil, state, nil, ErrSnapshotMismatch
}
match = true
}
default:
state.Reset()
return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
}
}
// 到了这里,读取WAL日志的循环就结束了,中间可能出错
// 如果读完了所有文件,则err = io.EOF
// 如果是没有读完文件而中间有其他错误,那么err就不是EOF了,下面会分别处理只读模式和写模式
switch w.tail() {
case nil:
// We do not have to read out all entries in read mode.
// The last record maybe a partial written one, so
// ErrunexpectedEOF might be returned.
if err != io.EOF && err != io.ErrUnexpectedEOF {
state.Reset()
return nil, state, nil, err
}
default:
// We must read all of the entries if WAL is opened in write mode.
if err != io.EOF {
state.Reset()
return nil, state, nil, err
}
// decodeRecord() will return io.EOF if it detects a zero record,
// but this zero record may be followed by non-zero records from
// a torn write. Overwriting some of these non-zero records, but
// not all, will cause CRC errors on WAL open. Since the records
// were never fully synced to disk in the first place, it's safe
// to zero them out to avoid any CRC errors from new writes.
if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
return nil, state, nil, err
}
if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
return nil, state, nil, err
}
}
err = nil
if !match {
err = ErrSnapshotNotFound
}
// close decoder, disable reading
if w.readClose != nil {
w.readClose()
w.readClose = nil
}
w.start = walpb.Snapshot{}
w.metadata = metadata
if w.tail() != nil {
// create encoder (chain crc with the decoder), enable appending
w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
if err != nil {
return
}
}
w.decoder = nil
return metadata, state, ents, err
}
// Close closes the current WAL file and directory.
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.fp != nil {
w.fp.Close()
w.fp = nil
}
if w.tail() != nil {
if err := w.sync(); err != nil {
return err
}
}
for _, l := range w.locks {
if l == nil {
continue
}
if err := l.Close(); err != nil {
klog.Errorf(fmt.Sprintf("failed to unlock during closing wal: %v", err))
}
}
return w.dirFile.Close()
}
func (w *WAL) cleanupWAL() {
var err error
if err = w.Close(); err != nil {
klog.Fatalf(fmt.Sprintf("failed to close WAL during cleanup err:%v", err))
}
brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
if err = os.Rename(w.dir, brokenDirName); err != nil {
klog.Fatalf(fmt.Sprintf("failed to rename WAL during cleanup source-path:%s rename-path:%s err:%v", w.dir, brokenDirName, err))
}
}
// Create creates a WAL ready for appending records. The given metadata is
// recorded at the head of each WAL file, and can be retrieved with ReadAll.
func Create(dataDir string, metadata []byte) (*WAL, error) {
if Exist(dataDir) {
return nil, os.ErrExist
}
// keep temporary wal directory so WAL initialization appears atomic
tmpdirpath := filepath.Clean(dataDir) + ".tmp"
if fileutil.Exist(tmpdirpath) {
if err := os.RemoveAll(tmpdirpath); err != nil {
return nil, err
}
}
defer os.RemoveAll(tmpdirpath)
if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to create a temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, dataDir, err))
return nil, err
}
path := filepath.Join(tmpdirpath, walName(0, 0))
f, err := fileutil.LockFile(path, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to flock an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if _, err = f.Seek(0, io.SeekEnd); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to seek an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to preallocate an initial WAL file path:%s segment-bytes:%d err:%v", path, SegmentSizeBytes, err))
return nil, err
}
// INFO: crcType -> metadataType -> snapshotType -> entryType -> stateType
w := &WAL{
dir: dataDir,
metadata: metadata,
}
w.encoder, err = newFileEncoder(f.File, 0)
if err != nil {
return nil, err
}
w.locks = append(w.locks, f)
if err = w.saveCrc(0); err != nil {
return nil, err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
return nil, err
}
if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
return nil, err
}
if w, err = w.renameWAL(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to rename the temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, w.dir, err))
return nil, err
}
defer func() {
if err != nil {
w.cleanupWAL()
}
}()
// directory was renamed; sync parent dir to persist rename
parentDir, err := fileutil.OpenDir(filepath.Dir(w.dir))
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to open the parent data directory parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
defer parentDir.Close()
if err = fileutil.Fsync(parentDir); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to fsync the parent data directory file parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
return w, nil
}
// Open opens the WAL at the given snap.
// The snap SHOULD have been previously saved to the WAL, or the following
// ReadAll will fail.
// The returned WAL is ready to read and the first record will be the one after
// the given snap. The WAL cannot be appended to before reading out all of its
// previous records.
func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
w, err := openAtIndex(dirpath, snap, true)
if err != nil {
return nil, err
}
if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
return nil, err
}
return w, nil
}
| lpb.Snapshot) err | conditional_block |
wal.go | package wal
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"time"
"k8s-lx1036/k8s/storage/etcd/raft"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/wal/walpb"
"k8s.io/klog/v2"
)
const (
// 所有的日志类型如下所示,会被append追加存储在wal文件中
metadataType int64 = iota + 1
entryType
stateType
crcType
snapshotType
// warnSyncDuration is the amount of time allotted to an fsync before
// logging a warning
warnSyncDuration = time.Second
)
var (
crcTable = crc32.MakeTable(crc32.Castagnoli)
// SegmentSizeBytes is the preallocated size of each wal segment file.
// The actual size might be larger than this. In general, the default
// value should be used, but this is defined as an exported variable
// so that tests can set a different segment size.
SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
ErrFileNotFound = errors.New("wal: file not found")
ErrSnapshotNotFound = errors.New("wal: snapshot not found")
ErrMetadataConflict = errors.New("wal: conflicting metadata found")
ErrCRCMismatch = errors.New("wal: crc mismatch")
ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
)
// WAL用于以append记录的方式快速记录数据到持久化存储中
// 只可能处于append模式或者读模式,但是不能同时处于两种模式中
// 新创建的WAL处于append模式,刚打开的WAL处于读模式
type WAL struct {
// 存放WAL文件的目录
dir string // the living directory of the underlay files
// 根据前面的dir成员创建的File指针
// dirFile is a fd for the wal directory for syncing on Rename
dirFile *os.File
// 每个WAL文件最开始的地方,都需要写入的元数据
metadata []byte // metadata recorded at the head of each WAL
state raftpb.HardState // hardstate recorded at the head of WAL
start walpb.Snapshot // snapshot to start reading
decoder *decoder // decoder to decode records
readClose func() error // closer for decode reader
mu sync.Mutex
enti uint64 // index of the last entry saved to the wal
encoder *encoder // encoder to encode records
locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
fp *filePipeline
}
func (w *WAL) Save(hardState raftpb.HardState, entries []raftpb.Entry) error {
w.mu.Lock()
defer w.mu.Unlock()
// short cut, do not call sync
if raft.IsEmptyHardState(hardState) && len(entries) == 0 {
return nil
}
mustSync := raft.MustSync(hardState, w.state, len(entries))
for i := range entries {
if err := w.saveEntry(&entries[i]); err != nil {
return err
}
}
if err := w.saveState(&hardState); err != nil {
return err
}
curOff, err := w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if curOff < SegmentSizeBytes {
if mustSync {
return w.sync()
}
return nil
}
return w.cut()
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
b := pbutil.MustMarshal(e)
rec := &walpb.Record{Type: entryType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
w.enti = e.Index
return nil
}
func (w *WAL) saveState(s *raftpb.HardState) error {
if raft.IsEmptyHardState(*s) {
return nil
}
w.state = *s
b := pbutil.MustMarshal(s)
rec := &walpb.Record{Type: stateType, Data: b}
return w.encoder.encode(rec)
}
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
func (w *WAL) cut() error {
// close old wal file; truncate to avoid wasting space if an early cut
off, serr := w.tail().Seek(0, io.SeekCurrent)
if serr != nil {
return serr
}
if err := w.tail().Truncate(off); err != nil {
return err
}
if err := w.sync(); err != nil {
return err
}
fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
// create a temp wal file with name sequence + 1, or truncate the existing one
newTail, err := w.fp.Open()
if err != nil {
return err
}
// update writer and save the previous crc
w.locks = append(w.locks, newTail)
prevCrc := w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
if err = w.saveCrc(prevCrc); err != nil {
return err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
return err
}
if err = w.saveState(&w.state); err != nil {
return err
}
// atomically move temp wal file to wal file
if err = w.sync(); err != nil {
return err
}
off, err = w.tail().Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if err = os.Rename(newTail.Name(), fpath); err != nil {
return err
}
start := time.Now()
if err = fileutil.Fsync(w.dirFile); err != nil {
return err
}
walFsyncSec.Observe(time.Since(start).Seconds())
// reopen newTail with its new path so calls to Name() match the wal filename format
newTail.Close()
if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
return err
}
if _, err = newTail.Seek(off, io.SeekStart); err != nil {
return err
}
w.locks[len(w.locks)-1] = newTail
prevCrc = w.encoder.crc.Sum32()
w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
if err != nil {
return err
}
klog.Infof(fmt.Sprintf("created a new WAL segment path:%s", fpath))
return nil
}
func (w *WAL) saveCrc(prevCrc uint32) error {
return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
}
func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
return err
}
b := pbutil.MustMarshal(&e)
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{Type: snapshotType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
// update enti only when snapshot is ahead of last index
if w.enti < e.Index {
w.enti = e.Index
}
return w.sync()
}
// 系统调用把snapshot写入持久化磁盘里
func (w *WAL) sync() error {
if w.encoder != nil {
if err := w.encoder.flush(); err != nil {
return err
}
}
start := time.Now()
err := fileutil.Fdatasync(w.tail().File)
took := time.Since(start)
if took > warnSyncDuration {
klog.Errorf(fmt.Sprintf("sync duration of %v, expected less than %v", took, warnSyncDuration))
}
walFsyncSec.Observe(took.Seconds())
return err
}
func (w *WAL) tail() *fileutil.LockedFile {
if len(w.locks) > 0 {
return w.locks[len(w.locks)-1]
}
return nil
}
// walName: %016x-%016x.wal, seq-index
func (w *WAL) seq() uint64 {
t := w.tail()
if t == nil {
return 0
}
seq, _, err := parseWALName(filepath.Base(t.Name()))
if err != nil {
klog.Fatalf(fmt.Sprintf("failed to parse WAL name:%s err:%v", t.Name(), err))
}
return seq
}
func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
if | sible the process will fork to spawn a process while this is
// happening. The fds are set up as close-on-exec by the Go runtime,
// but there is a window between the fork and the exec where another
// process holds the lock.
if err := os.Rename(tmpdirpath, w.dir); err != nil {
if _, ok := err.(*os.LinkError); ok {
return w.renameWALUnlock(tmpdirpath)
}
return nil, err
}
w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
df, err := fileutil.OpenDir(w.dir)
w.dirFile = df
return w, err
}
func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
// rename of directory with locked files doesn't work on windows/cifs;
// close the WAL to release the locks so the directory can be renamed.
klog.Infof(fmt.Sprintf("closing WAL to release flock and retry directory renaming from %s to %s", tmpdirpath, w.dir))
w.Close()
if err := os.Rename(tmpdirpath, w.dir); err != nil {
return nil, err
}
// reopen and relock
newWAL, oerr := Open(w.dir, walpb.Snapshot{})
if oerr != nil {
return nil, oerr
}
if _, _, _, err := newWAL.ReadAll(); err != nil {
newWAL.Close()
return nil, err
}
return newWAL, nil
}
// ReadAll ReadAll函数负责从当前WAL实例中读取所有的记录
// 如果是可写模式,那么必须独处所有的记录,否则将报错
// 如果是只读模式,将尝试读取所有的记录,但是如果读出的记录没有满足快照数据的要求,将返回ErrSnapshotNotFound
// 而如果读出来的快照数据与要求的快照数据不匹配,返回所有的记录以及ErrSnapshotMismatch
// ReadAll reads out records of the current WAL.
// If opened in write mode, it must read out all records until EOF. Or an error
// will be returned.
// If opened in read mode, it will try to read all records if possible.
// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
// If loaded snap doesn't match with the expected one, it will return
// all the records and error ErrSnapshotMismatch.
// INFO: detect not-last-snap error.
// INFO: maybe loose the checking of match.
// After ReadAll, the WAL will be ready for appending new records.
func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{}
decoder := w.decoder
var match bool
// 循环读出record记录
for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
switch rec.Type {
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
ents = append(ents[:e.Index-w.start.Index-1], e)
}
w.enti = e.Index
case stateType:
state = mustUnmarshalState(rec.Data)
case metadataType:
if metadata != nil && !bytes.Equal(metadata, rec.Data) {
state.Reset()
return nil, state, nil, ErrMetadataConflict
}
metadata = rec.Data
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
state.Reset()
return nil, state, nil, ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var snap walpb.Snapshot
pbutil.MustUnmarshal(&snap, rec.Data)
if snap.Index == w.start.Index {
if snap.Term != w.start.Term {
state.Reset()
return nil, state, nil, ErrSnapshotMismatch
}
match = true
}
default:
state.Reset()
return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
}
}
// 到了这里,读取WAL日志的循环就结束了,中间可能出错
// 如果读完了所有文件,则err = io.EOF
// 如果是没有读完文件而中间有其他错误,那么err就不是EOF了,下面会分别处理只读模式和写模式
switch w.tail() {
case nil:
// We do not have to read out all entries in read mode.
// The last record maybe a partial written one, so
// ErrunexpectedEOF might be returned.
if err != io.EOF && err != io.ErrUnexpectedEOF {
state.Reset()
return nil, state, nil, err
}
default:
// We must read all of the entries if WAL is opened in write mode.
if err != io.EOF {
state.Reset()
return nil, state, nil, err
}
// decodeRecord() will return io.EOF if it detects a zero record,
// but this zero record may be followed by non-zero records from
// a torn write. Overwriting some of these non-zero records, but
// not all, will cause CRC errors on WAL open. Since the records
// were never fully synced to disk in the first place, it's safe
// to zero them out to avoid any CRC errors from new writes.
if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
return nil, state, nil, err
}
if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
return nil, state, nil, err
}
}
err = nil
if !match {
err = ErrSnapshotNotFound
}
// close decoder, disable reading
if w.readClose != nil {
w.readClose()
w.readClose = nil
}
w.start = walpb.Snapshot{}
w.metadata = metadata
if w.tail() != nil {
// create encoder (chain crc with the decoder), enable appending
w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
if err != nil {
return
}
}
w.decoder = nil
return metadata, state, ents, err
}
// Close closes the current WAL file and directory.
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.fp != nil {
w.fp.Close()
w.fp = nil
}
if w.tail() != nil {
if err := w.sync(); err != nil {
return err
}
}
for _, l := range w.locks {
if l == nil {
continue
}
if err := l.Close(); err != nil {
klog.Errorf(fmt.Sprintf("failed to unlock during closing wal: %v", err))
}
}
return w.dirFile.Close()
}
func (w *WAL) cleanupWAL() {
var err error
if err = w.Close(); err != nil {
klog.Fatalf(fmt.Sprintf("failed to close WAL during cleanup err:%v", err))
}
brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
if err = os.Rename(w.dir, brokenDirName); err != nil {
klog.Fatalf(fmt.Sprintf("failed to rename WAL during cleanup source-path:%s rename-path:%s err:%v", w.dir, brokenDirName, err))
}
}
// Create creates a WAL ready for appending records. The given metadata is
// recorded at the head of each WAL file, and can be retrieved with ReadAll.
func Create(dataDir string, metadata []byte) (*WAL, error) {
if Exist(dataDir) {
return nil, os.ErrExist
}
// keep temporary wal directory so WAL initialization appears atomic
tmpdirpath := filepath.Clean(dataDir) + ".tmp"
if fileutil.Exist(tmpdirpath) {
if err := os.RemoveAll(tmpdirpath); err != nil {
return nil, err
}
}
defer os.RemoveAll(tmpdirpath)
if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to create a temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, dataDir, err))
return nil, err
}
path := filepath.Join(tmpdirpath, walName(0, 0))
f, err := fileutil.LockFile(path, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to flock an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if _, err = f.Seek(0, io.SeekEnd); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to seek an initial WAL file path:%s err:%v", path, err))
return nil, err
}
if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to preallocate an initial WAL file path:%s segment-bytes:%d err:%v", path, SegmentSizeBytes, err))
return nil, err
}
// INFO: crcType -> metadataType -> snapshotType -> entryType -> stateType
w := &WAL{
dir: dataDir,
metadata: metadata,
}
w.encoder, err = newFileEncoder(f.File, 0)
if err != nil {
return nil, err
}
w.locks = append(w.locks, f)
if err = w.saveCrc(0); err != nil {
return nil, err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
return nil, err
}
if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
return nil, err
}
if w, err = w.renameWAL(tmpdirpath); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to rename the temporary WAL directory tmp-dir-path:%s dir-path:%s err:%v", tmpdirpath, w.dir, err))
return nil, err
}
defer func() {
if err != nil {
w.cleanupWAL()
}
}()
// directory was renamed; sync parent dir to persist rename
parentDir, err := fileutil.OpenDir(filepath.Dir(w.dir))
if err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to open the parent data directory parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
defer parentDir.Close()
if err = fileutil.Fsync(parentDir); err != nil {
klog.Errorf(fmt.Sprintf("[WAL Create]failed to fsync the parent data directory file parent-dir-path:%s dir-path:%s err:%v", filepath.Dir(w.dir), w.dir, err))
return nil, err
}
return w, nil
}
// Open opens the WAL at the given snap.
// The snap SHOULD have been previously saved to the WAL, or the following
// ReadAll will fail.
// The returned WAL is ready to read and the first record will be the one after
// the given snap. The WAL cannot be appended to before reading out all of its
// previous records.
func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
w, err := openAtIndex(dirpath, snap, true)
if err != nil {
return nil, err
}
if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
return nil, err
}
return w, nil
}
| err := os.RemoveAll(w.dir); err != nil {
return nil, err
}
// On non-Windows platforms, hold the lock while renaming. Releasing
// the lock and trying to reacquire it quickly can be flaky because
// it's pos | identifier_body |
car_type_management.js | $(document).ready(function(){
initTable();
vadidateModal();
$('#search_text').keydown(function (e) {
if (e.keyCode === 13) {
$('#search_btn').click();
}
});
});
function addCarType() {
$('#add_car_type_form').data('bootstrapValidator').validate();
if(!$('#add_car_type_form').data('bootstrapValidator').isValid()){
return ;
}
$('#add_modal').modal('hide');
var car_name = $('#add_car_name').val();
var car_brand = $('#add_car_brand').val();
var daily_rent = $('#add_daily_rent').val();
var car_deposit = $('#add_car_deposit').val();
var car_type = $('#add_car_type').val();
// var car_picture = $('#user-photo').cropper('getCroppedCanvas', {
// width: 300,
| // height: 300
// }).toDataURL('image/png');
if(document.getElementById('user-photo').src == "") {
alert('请输入图片')
return
}
var car_picture = getBase64Image(document.getElementById('user-photo'))
var data = {
"car_name": car_name,
"car_brand": car_brand,
"daily_rent": daily_rent,
"car_deposit": car_deposit,
"car_type": car_type,
"car_picture": car_picture
};
$.ajax({
type: "post",
url: "addCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("添加失败!");
}
else {
alert("添加成功!");
}
}
});
$('#car_type_info').bootstrapTable('refresh');
resetModal();
}
function searchCarType() {
var car_name = $("#search_text").val();
var data = {
"car_name": car_name
};
$.ajax({
type: "post",
url: "searchCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
$('#car_info').bootstrapTable('load', json);
}
});
$('#search_text').val('');
}
function initTable() {
$('#car_type_info').bootstrapTable('destroy');
$("#car_type_info").bootstrapTable({
//使用post请求到服务器获取数据
method: "post",
//获取数据的Servlet地址
url: "carTypeServlet",
//表格显示条纹
striped: true,
//启动分页
pagination: true,
//每页显示的记录数
pageSize: 20,
//记录数可选列表
pageList: [10, 15, 20, 25, 30],
//是否启用查询
search: false,
//显示下拉框勾选要显示的列
showColumns: true,
//显示刷新按钮
showRefresh: true,
//设置是由客户端分页还是由服务端分页
sidePagination: "client",
columns: [{
field: 'car_name',
title: '车辆名称',
sortable: true
}, {
field: 'car_brand',
title: '品牌名称',
type: 'text',
editable: {
title: '输入品牌名称',
type: 'text',
validate: function(v) {
if (!v) {
return '品牌名称不能为空';
}
}
}
}, {
field: 'daily_rent',
title: '日租金',
sortable: true,
editable: {
title: '输入日租金',
type: 'text',
validate: function(v) {
if (!v) {
return '日租金不能为空';
}
else if (parseFloat(v) < 0) {
return '日租金不能小于0';
}
}
}
}, {
field: 'car_deposit',
title: '所需押金',
sortable: true,
editable: {
title: '输入所需押金',
type: 'text',
validate: function(v) {
if (!v) {
return '所需押金不能为空';
}
else if (parseFloat(v) < 0) {
return '所需押金不能小于0';
}
}
}
}, {
field: 'car_type',
title: '车类',
editable: {
title: '选择车类',
type: 'select',
source: [{
value: '经济型',
text: '经济型'
}, {
value: '商务型',
text: '商务型'
}, {
value: '豪华型',
text: '豪华型'
}]
}
}, {
field: 'car_picture',
title: '车辆图片',
sortable: true,
align: 'center',
formatter: function(value,row,index){
if (value!=null)
return '<img src="'+ value +'" class="img-rounded" >';
else
return null
}
},{
field: 'button',
title: '操作',
events: operateEvents = {
'click #delete_button': function (e, value, row) {
var data = {
"car_name": row.car_name
};
$.ajax({
type: "post",
url: "deleteCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
if(parseInt(json.code) === 1) {
alert("删除失败!");
}
else {
alert("删除成功!");
$('#car_type_info').bootstrapTable('refresh');
}
}
});
}
},
formatter: function () {
return ['<button id="delete_button" class="btn btn-default">删除</button>'].join('');
}
}],
onEditableSave: function(field, row) {
var data = {
"car_name": row.car_name,
"car_brand": row.car_brand,
"daily_rent": row.daily_rent,
"car_deposit": row.car_deposit,
"car_type": row.car_type,
"car_picture": row.car_picture
};
$.ajax({
type: "post",
url: "updateCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("更改失败!");
}
else {
alert("更改成功!");
}
}
});
}
});
}
function vadidateModal() {
$('#add_car_type_form').bootstrapValidator({
feedbackIcons: {
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
fields: {
car_name: {
validators: {
notEmpty: {
message: '车辆名称不能为空'
}
}
},
car_brand: {
validators: {
notEmpty: {
message: '品牌名称不能为空'
}
}
},
daily_rent: {
validators: {
notEmpty: {
message: '日租金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '日租金必须为非负数'
}
}
},
car_deposit: {
validators: {
notEmpty: {
message: '所需押金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '所需押金必须为非负数'
}
}
},
car_picture:{
validators: {
notEmpty: {
message: '车辆图片不能为空'
}
}
}
}
});
}
var initCropperInModal = function(img, input, modal){
var $image = img;
var $inputImage = input;
var $modal = modal;
var options = {
aspectRatio: 16/9, // 纵横比
viewMode: 2,
preview: '.img-preview' // 预览图的class名
};
// 模态框隐藏后需要保存的数据对象
var saveData = {};
var URL = window.URL || window.webkitURL;
var blobURL;
$modal.on('show.bs.modal',function () {
// 如果打开模态框时没有选择文件就点击“打开图片”按钮
if(!$inputImage.val()){
$inputImage.click();
}
}).on('shown.bs.modal', function () {
// 重新创建
$image.cropper( $.extend(options, {
ready: function () {
// 当剪切界面就绪后,恢复数据
if(saveData.canvasData){
$image.cropper('setCanvasData', saveData.canvasData);
$image.cropper('setCropBoxData', saveData.cropBoxData);
}
}
}));
}).on('hidden.bs.modal', function () {
// 保存相关数据
saveData.cropBoxData = $image.cropper('getCropBoxData');
saveData.canvasData = $image.cropper('getCanvasData');
// 销毁并将图片保存在img标签
$image.cropper('destroy').attr('src',blobURL);
});
if (URL) {
$inputImage.change(function() {
var files = this.files;
var file;
if (!$image.data('cropper')) {
return;
}
if (files && files.length) {
file = files[0];
if (/^image\/\w+$/.test(file.type)) {
if(blobURL) {
URL.revokeObjectURL(blobURL);
}
blobURL = URL.createObjectURL(file);
// 重置cropper,将图像替换
$image.cropper('reset').cropper('replace', blobURL);
// 选择文件后,显示和隐藏相关内容
$('.img-container').removeClass('hidden');
$('.img-preview-box').removeClass('hidden');
$('#changeModal .disabled').removeAttr('disabled').removeClass('disabled');
$('#changeModal .tip-info').addClass('hidden');
} else {
window.alert('请选择一个图像文件!');
}
}
});
} else {
$inputImage.prop('disabled', true).addClass('disabled');
}
}
function getBase64Image(img) {
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, img.width, img.height);
var dataURL = canvas.toDataURL("image/png");
return dataURL
// return dataURL.replace("data:image/png;base64,", "");
}
var sendPhoto = function () {
// 得到PNG格式的dataURL
$('#photo').cropper('getCroppedCanvas',{
width:320,
height:180
}).toBlob(function(blob){
// 转化为blob后更改src属性,隐藏模态框
$('#user-photo').attr('src',URL.createObjectURL(blob));
$('#changeModal').modal('hide');
});
$('#changeModal').modal('hide');
}
$(function(){
initCropperInModal($('#photo'),$('#photoInput'),$('#changeModal'));
});
function resetModal() {
$('#add_car_type_form').find('input').val('');
$('#add_car_type_form').data('bootstrapValidator').destroy();
$('#add_car_type_form').data('bootstrapValidator', null);
$('#add_car_type').selectpicker('refresh');
vadidateModal();
} | random_line_split | |
car_type_management.js | $(document).ready(function(){
initTable();
vadidateModal();
$('#search_text').keydown(function (e) {
if (e.keyCode === 13) {
$('#search_btn').click();
}
});
});
function addCarType() {
$('#add_car_type_form').data('bootstrapValidator').validate();
if(!$('#add_car_type_form').data('bootstrapValidator').isValid()){
return ;
}
$('#add_modal').modal('hide');
var car_name = $('#add_car_name').val();
var car_brand = $('#add_car_brand').val();
var daily_rent = $('#add_daily_rent').val();
var car_deposit = $('#add_car_deposit').val();
var car_type = $('#add_car_type').val();
// var car_picture = $('#user-photo').cropper('getCroppedCanvas', {
// width: 300,
// height: 300
// }).toDataURL('image/png');
if(document.getElementById('user-photo').src == "") {
alert('请输入图片')
return
}
var car_picture = getBase64Image(document.getElementById('user-photo'))
var data = {
"car_name": car_name,
"car_brand": car_brand,
"daily_rent": daily_rent,
"car_deposit": car_deposit,
"car_type": car_type,
"car_picture": car_picture
};
$.ajax({
type: "post",
url: "addCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("添加失败!");
}
else {
alert("添加成功!");
}
}
});
$('#car_type_info').bootstrapTable('refresh');
resetModal();
}
function searchCarType() {
var car_name = $("#search_text").val();
var data = {
"car_name": car_name
};
$.ajax({
type: "post",
url: "searchCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
$('#car_info').bootstrapTable('load', json);
}
});
$('#search_text').val('');
}
function initTable() {
$('#car_type_info').bootstrapTable('destroy');
$("#car_type_info").bootstrapTable({
//使用post请求到服务器获取数据
method: "post",
//获取数据的Servlet地址
url: "carTypeServlet",
//表格显示条纹
striped: true,
//启动分页
pagination: true,
//每页显示的记录数
pageSize: 20,
//记录数可选列表
pageList: [10, 15, 20, 25, 30],
//是否启用查询
search: false,
//显示下拉框勾选要显示的列
showColumns: true,
//显示刷新按钮
showRefresh: true,
//设置是由客户端分页还是由服务端分页
sidePagination: "client",
columns: [{
field: 'car_name',
title: '车辆名称',
sortable: true
}, {
field: 'car_brand',
title: '品牌名称',
type: 'text',
editable: {
title: '输入品牌名称',
type: 'text',
validate: function(v) {
if (!v) {
return '品牌名称不能为空';
}
}
}
}, {
field: 'daily_rent',
title: '日租金',
sortable: true,
editable: {
title: '输入日租金',
type: 'text',
validate: function(v) {
if (!v) {
return '日租金不能为空';
}
else if (parseFloat(v) < 0) {
return '日租金不能小于0';
}
}
}
}, {
field: 'car_deposit',
title: '所需押金',
sortable: true,
editable: {
title: '输入所需押金',
type: 'text',
validate: function(v) {
if (!v) {
return '所需押金不能为空';
}
else if (parseFloat(v) < 0) {
return '所需押金不能小于0';
}
}
}
}, {
field: 'car_type',
title: '车类',
editable: {
title: '选择车类',
type: 'select',
source: [{
value: '经济型',
text: '经济型'
}, {
value: '商务型',
text: '商务型'
}, {
value: '豪华型',
text: '豪华型'
}]
}
}, {
field: 'car_picture',
title: '车辆图片',
sortable: true,
align: 'center',
formatter: function(value,row,index){
if (value!=null)
return '<img src="'+ value +'" class="img-rounded" >';
else
return null
}
},{
field: 'button',
title: '操作',
events: operateEvents = {
'click #delete_button': function (e, value, row) {
var data = {
"car_name": row.car_name
};
$.ajax({
type: "post",
url: "deleteCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
if(parseInt(json.code) === 1) {
alert("删除失败!");
}
else {
alert("删除成功!");
$('#car_type_info').bootstrapTable('refresh');
}
}
});
}
},
formatter: function () {
return ['<button id="delete_button" class="btn btn-default">删除</button>'].join('');
}
}],
onEditableSave: function(field, row) {
var data = {
"car_name": row.car_name,
"car_brand": row.car_brand,
"daily_rent": row.daily_rent,
"car_deposit": row.car_deposit,
"car_type": row.car_type,
"car_picture": row.car_picture
};
$.ajax({
type: "post",
url: "updateCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("更改失败!");
}
else {
alert("更改成功!");
}
}
});
}
});
}
function vadidateModal() {
$('#add_car_type_form').bootstrapValidator({
feedbackIcons: {
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
fields: {
car_name: {
validators: {
notEmpty: {
message: '车辆名称不能为空'
}
}
},
car_brand: {
validators: {
notEmpty: {
message: '品牌名称不能为空'
}
}
},
daily_rent: {
validators: {
notEmpty: {
message: '日租金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '日租金必须为非负数'
}
}
},
car_deposit: {
validators: {
notEmpty: {
message: '所需押金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '所需押金必须为非负数'
}
}
},
car_picture:{
validators: {
notEmpty: {
message: '车辆图片不能为空'
}
}
}
}
});
}
var initCropperInModal = function(img, input, modal){
var $image = img;
var $inputImage = input;
var $modal = modal;
var options = {
aspectRatio: 16/9, // 纵横比
viewMode: 2,
preview: '.img-preview' // 预览图的class名
};
// 模态框隐藏后需要保存的数据对象
var saveData = {};
var URL = window.URL || window.webkitURL;
var blobURL;
$modal.on('show.bs.modal',function () {
// 如果打开模态框时没有选择文件就点击“打开图片”按钮
if(!$inputImage.val()){
$inputImage.click();
}
}).on('shown.bs.modal', function () {
// 重新创建
$image.cropper( $.extend(options, {
ready: function () {
// 当剪切界面就绪后,恢复数据
if(saveData.canvasData){
$image.cropper('setCanvasData', saveData.canvasData);
$image.cropper('setCropBoxData', saveData.cropBoxData);
}
}
}));
}).on('hidden.bs.modal', function () {
// 保存相关数据
saveData.cropBoxData = $image.cropper('getCropBoxData');
saveData.canvasData = $image.cropper('getCanvasData');
// 销毁并将图片保存在img标签
$image.cropper('destroy').attr('src',blobURL);
});
if (URL) {
$inputImage.change(function() {
var files = this.files;
var file;
if (!$image.data('cropper')) {
return;
}
if (files && files.length) {
file = files[0];
if (/^image\/\w+$/.test(file.type)) {
if(blobURL) {
URL.revokeObjectURL(blobURL);
}
blobURL = URL.createObjectURL(file);
// 重置cropper,将图像替换
$image.cropper('reset').cropper('replace', blobURL);
// 选择文件后,显示和隐藏相关内容
$('.img-container').removeClass('hidden');
$('.img-preview-box').removeClass('hidden');
$('#changeModal .disabled').removeAttr('disabled').removeClass('disabled');
$('#changeModal .tip-info').addClass('hidden');
} else {
window.alert('请选择一个图像文件!');
}
}
});
} else {
$inputImage.prop('disabled', true).addClass('disabled');
}
}
function getBase64Image(img) {
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, img.width, img.height);
var dataURL = canvas.toDataURL("image/png");
return dataURL
// return dataURL.replace("data:image/png;base64,", "");
}
var sendPhoto = function () {
// 得到PNG格式的dataURL
$('#photo').cropper('getCroppedCanvas',{
width:320,
height:180
}).toBlob(function(blob){
// 转化为blob后更改src属性,隐藏模态框
$('#user-photo').attr('src',URL.createObjectURL(blob));
$('#changeModal').modal('hide');
});
$('#changeModal').modal('hide');
}
$(function(){
initCropperInModal($('# | otoInput'),$('#changeModal'));
});
function resetModal() {
$('#add_car_type_form').find('input').val('');
$('#add_car_type_form').data('bootstrapValidator').destroy();
$('#add_car_type_form').data('bootstrapValidator', null);
$('#add_car_type').selectpicker('refresh');
vadidateModal();
} | photo'),$('#ph | identifier_name |
car_type_management.js | $(document).ready(function(){
initTable();
vadidateModal();
$('#search_text').keydown(function (e) {
if (e.keyCode === 13) {
$('#search_btn').click();
}
});
});
function addCarType() | var car_name = $("#search_text").val();
var data = {
"car_name": car_name
};
$.ajax({
type: "post",
url: "searchCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
$('#car_info').bootstrapTable('load', json);
}
});
$('#search_text').val('');
}
function initTable() {
$('#car_type_info').bootstrapTable('destroy');
$("#car_type_info").bootstrapTable({
//使用post请求到服务器获取数据
method: "post",
//获取数据的Servlet地址
url: "carTypeServlet",
//表格显示条纹
striped: true,
//启动分页
pagination: true,
//每页显示的记录数
pageSize: 20,
//记录数可选列表
pageList: [10, 15, 20, 25, 30],
//是否启用查询
search: false,
//显示下拉框勾选要显示的列
showColumns: true,
//显示刷新按钮
showRefresh: true,
//设置是由客户端分页还是由服务端分页
sidePagination: "client",
columns: [{
field: 'car_name',
title: '车辆名称',
sortable: true
}, {
field: 'car_brand',
title: '品牌名称',
type: 'text',
editable: {
title: '输入品牌名称',
type: 'text',
validate: function(v) {
if (!v) {
return '品牌名称不能为空';
}
}
}
}, {
field: 'daily_rent',
title: '日租金',
sortable: true,
editable: {
title: '输入日租金',
type: 'text',
validate: function(v) {
if (!v) {
return '日租金不能为空';
}
else if (parseFloat(v) < 0) {
return '日租金不能小于0';
}
}
}
}, {
field: 'car_deposit',
title: '所需押金',
sortable: true,
editable: {
title: '输入所需押金',
type: 'text',
validate: function(v) {
if (!v) {
return '所需押金不能为空';
}
else if (parseFloat(v) < 0) {
return '所需押金不能小于0';
}
}
}
}, {
field: 'car_type',
title: '车类',
editable: {
title: '选择车类',
type: 'select',
source: [{
value: '经济型',
text: '经济型'
}, {
value: '商务型',
text: '商务型'
}, {
value: '豪华型',
text: '豪华型'
}]
}
}, {
field: 'car_picture',
title: '车辆图片',
sortable: true,
align: 'center',
formatter: function(value,row,index){
if (value!=null)
return '<img src="'+ value +'" class="img-rounded" >';
else
return null
}
},{
field: 'button',
title: '操作',
events: operateEvents = {
'click #delete_button': function (e, value, row) {
var data = {
"car_name": row.car_name
};
$.ajax({
type: "post",
url: "deleteCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
if(parseInt(json.code) === 1) {
alert("删除失败!");
}
else {
alert("删除成功!");
$('#car_type_info').bootstrapTable('refresh');
}
}
});
}
},
formatter: function () {
return ['<button id="delete_button" class="btn btn-default">删除</button>'].join('');
}
}],
onEditableSave: function(field, row) {
var data = {
"car_name": row.car_name,
"car_brand": row.car_brand,
"daily_rent": row.daily_rent,
"car_deposit": row.car_deposit,
"car_type": row.car_type,
"car_picture": row.car_picture
};
$.ajax({
type: "post",
url: "updateCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("更改失败!");
}
else {
alert("更改成功!");
}
}
});
}
});
}
function vadidateModal() {
$('#add_car_type_form').bootstrapValidator({
feedbackIcons: {
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
fields: {
car_name: {
validators: {
notEmpty: {
message: '车辆名称不能为空'
}
}
},
car_brand: {
validators: {
notEmpty: {
message: '品牌名称不能为空'
}
}
},
daily_rent: {
validators: {
notEmpty: {
message: '日租金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '日租金必须为非负数'
}
}
},
car_deposit: {
validators: {
notEmpty: {
message: '所需押金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '所需押金必须为非负数'
}
}
},
car_picture:{
validators: {
notEmpty: {
message: '车辆图片不能为空'
}
}
}
}
});
}
var initCropperInModal = function(img, input, modal){
var $image = img;
var $inputImage = input;
var $modal = modal;
var options = {
aspectRatio: 16/9, // 纵横比
viewMode: 2,
preview: '.img-preview' // 预览图的class名
};
// 模态框隐藏后需要保存的数据对象
var saveData = {};
var URL = window.URL || window.webkitURL;
var blobURL;
$modal.on('show.bs.modal',function () {
// 如果打开模态框时没有选择文件就点击“打开图片”按钮
if(!$inputImage.val()){
$inputImage.click();
}
}).on('shown.bs.modal', function () {
// 重新创建
$image.cropper( $.extend(options, {
ready: function () {
// 当剪切界面就绪后,恢复数据
if(saveData.canvasData){
$image.cropper('setCanvasData', saveData.canvasData);
$image.cropper('setCropBoxData', saveData.cropBoxData);
}
}
}));
}).on('hidden.bs.modal', function () {
// 保存相关数据
saveData.cropBoxData = $image.cropper('getCropBoxData');
saveData.canvasData = $image.cropper('getCanvasData');
// 销毁并将图片保存在img标签
$image.cropper('destroy').attr('src',blobURL);
});
if (URL) {
$inputImage.change(function() {
var files = this.files;
var file;
if (!$image.data('cropper')) {
return;
}
if (files && files.length) {
file = files[0];
if (/^image\/\w+$/.test(file.type)) {
if(blobURL) {
URL.revokeObjectURL(blobURL);
}
blobURL = URL.createObjectURL(file);
// 重置cropper,将图像替换
$image.cropper('reset').cropper('replace', blobURL);
// 选择文件后,显示和隐藏相关内容
$('.img-container').removeClass('hidden');
$('.img-preview-box').removeClass('hidden');
$('#changeModal .disabled').removeAttr('disabled').removeClass('disabled');
$('#changeModal .tip-info').addClass('hidden');
} else {
window.alert('请选择一个图像文件!');
}
}
});
} else {
$inputImage.prop('disabled', true).addClass('disabled');
}
}
function getBase64Image(img) {
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, img.width, img.height);
var dataURL = canvas.toDataURL("image/png");
return dataURL
// return dataURL.replace("data:image/png;base64,", "");
}
var sendPhoto = function () {
// 得到PNG格式的dataURL
$('#photo').cropper('getCroppedCanvas',{
width:320,
height:180
}).toBlob(function(blob){
// 转化为blob后更改src属性,隐藏模态框
$('#user-photo').attr('src',URL.createObjectURL(blob));
$('#changeModal').modal('hide');
});
$('#changeModal').modal('hide');
}
$(function(){
initCropperInModal($('#photo'),$('#photoInput'),$('#changeModal'));
});
function resetModal() {
$('#add_car_type_form').find('input').val('');
$('#add_car_type_form').data('bootstrapValidator').destroy();
$('#add_car_type_form').data('bootstrapValidator', null);
$('#add_car_type').selectpicker('refresh');
vadidateModal();
} | {
$('#add_car_type_form').data('bootstrapValidator').validate();
if(!$('#add_car_type_form').data('bootstrapValidator').isValid()){
return ;
}
$('#add_modal').modal('hide');
var car_name = $('#add_car_name').val();
var car_brand = $('#add_car_brand').val();
var daily_rent = $('#add_daily_rent').val();
var car_deposit = $('#add_car_deposit').val();
var car_type = $('#add_car_type').val();
// var car_picture = $('#user-photo').cropper('getCroppedCanvas', {
// width: 300,
// height: 300
// }).toDataURL('image/png');
if(document.getElementById('user-photo').src == "") {
alert('请输入图片')
return
}
var car_picture = getBase64Image(document.getElementById('user-photo'))
var data = {
"car_name": car_name,
"car_brand": car_brand,
"daily_rent": daily_rent,
"car_deposit": car_deposit,
"car_type": car_type,
"car_picture": car_picture
};
$.ajax({
type: "post",
url: "addCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("添加失败!");
}
else {
alert("添加成功!");
}
}
});
$('#car_type_info').bootstrapTable('refresh');
resetModal();
}
function searchCarType() {
| identifier_body |
car_type_management.js | $(document).ready(function(){
initTable();
vadidateModal();
$('#search_text').keydown(function (e) {
if (e.keyCode === 13) {
$('#search_btn').click();
}
});
});
function addCarType() {
$('#add_car_type_form').data('bootstrapValidator').validate();
if(!$('#add_car_type_form').data('bootstrapValidator').isValid()){
return ;
}
$('#add_modal').modal('hide');
var car_name = $('#add_car_name').val();
var car_brand = $('#add_car_brand').val();
var daily_rent = $('#add_daily_rent').val();
var car_deposit = $('#add_car_deposit').val();
var car_type = $('#add_car_type').val();
// var car_picture = $('#user-photo').cropper('getCroppedCanvas', {
// width: 300,
// height: 300
// }).toDataURL('image/png');
if(document.getElementById('user-photo').src == "") {
alert('请输入图片')
return
}
var car_picture = getBase64Image(document.getElementById('user-photo'))
var data = {
"car_name": car_name,
"car_brand": car_brand,
"daily_rent": daily_rent,
"car_deposit": car_deposit,
"car_type": car_type,
"car_picture": car_picture
};
$.ajax({
type: "post",
url: "addCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("添加失败!");
}
else {
alert("添加成功!");
}
}
});
$('#car_type_info').bootstrapTable('refresh');
resetModal();
}
function searchCarType() {
var car_name = $("#search_text").val();
var data = {
"car_name": car_name
};
$.ajax({
type: "post",
url: "searchCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
$('#car_info').bootstrapTable('load', json);
}
});
$('#search_text').val('');
}
function initTable() {
$('#car_type_info').bootstrapTable('destroy');
$("#car_type_info").bootstrapTable({
//使用post请求到服务器获取数据
method: "post",
//获取数据的Servlet地址
url: "carTypeServlet",
//表格显示条纹
striped: true,
//启动分页
pagination: true,
//每页显示的记录数
pageSize: 20,
//记录数可选列表
pageList: [10, 15, 20, 25, 30],
//是否启用查询
search: false,
//显示下拉框勾选要显示的列
showColumns: true,
//显示刷新按钮
showRefresh: true,
//设置是由客户端分页还是由服务端分页
sidePagination: "client",
columns: [{
field: 'car_name',
title: '车辆名称',
sortable: true
}, {
field: 'car_brand',
title: '品牌名称',
type: 'text',
editable: {
title: '输入品牌名称',
type: 'text',
validate: function(v) {
if (!v) {
return '品牌名称不能为空';
}
}
}
}, {
field: 'daily_rent',
title: '日租金',
sortable: true,
editable: {
title: '输入日租金',
type: 'text',
validate: function(v) {
if (!v) {
return '日租金不能为空';
}
else if (parseFloat(v) < 0) {
return '日租金不能小于0';
}
}
}
}, {
field: 'car_d | itable: {
title: '输入所需押金',
type: 'text',
validate: function(v) {
if (!v) {
return '所需押金不能为空';
}
else if (parseFloat(v) < 0) {
return '所需押金不能小于0';
}
}
}
}, {
field: 'car_type',
title: '车类',
editable: {
title: '选择车类',
type: 'select',
source: [{
value: '经济型',
text: '经济型'
}, {
value: '商务型',
text: '商务型'
}, {
value: '豪华型',
text: '豪华型'
}]
}
}, {
field: 'car_picture',
title: '车辆图片',
sortable: true,
align: 'center',
formatter: function(value,row,index){
if (value!=null)
return '<img src="'+ value +'" class="img-rounded" >';
else
return null
}
},{
field: 'button',
title: '操作',
events: operateEvents = {
'click #delete_button': function (e, value, row) {
var data = {
"car_name": row.car_name
};
$.ajax({
type: "post",
url: "deleteCarTypeServlet",
data: data,
dataType: "json",
success: function(json){
if(parseInt(json.code) === 1) {
alert("删除失败!");
}
else {
alert("删除成功!");
$('#car_type_info').bootstrapTable('refresh');
}
}
});
}
},
formatter: function () {
return ['<button id="delete_button" class="btn btn-default">删除</button>'].join('');
}
}],
onEditableSave: function(field, row) {
var data = {
"car_name": row.car_name,
"car_brand": row.car_brand,
"daily_rent": row.daily_rent,
"car_deposit": row.car_deposit,
"car_type": row.car_type,
"car_picture": row.car_picture
};
$.ajax({
type: "post",
url: "updateCarTypeServlet",
data: data,
dataType: "json",
async: false,
success: function(json) {
if(parseInt(json.code) === 1) {
alert("更改失败!");
}
else {
alert("更改成功!");
}
}
});
}
});
}
function vadidateModal() {
$('#add_car_type_form').bootstrapValidator({
feedbackIcons: {
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
fields: {
car_name: {
validators: {
notEmpty: {
message: '车辆名称不能为空'
}
}
},
car_brand: {
validators: {
notEmpty: {
message: '品牌名称不能为空'
}
}
},
daily_rent: {
validators: {
notEmpty: {
message: '日租金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '日租金必须为非负数'
}
}
},
car_deposit: {
validators: {
notEmpty: {
message: '所需押金不能为空'
},
regexp: {
regexp: /^[0-9]+$/,
message: '所需押金必须为非负数'
}
}
},
car_picture:{
validators: {
notEmpty: {
message: '车辆图片不能为空'
}
}
}
}
});
}
var initCropperInModal = function(img, input, modal){
var $image = img;
var $inputImage = input;
var $modal = modal;
var options = {
aspectRatio: 16/9, // 纵横比
viewMode: 2,
preview: '.img-preview' // 预览图的class名
};
// 模态框隐藏后需要保存的数据对象
var saveData = {};
var URL = window.URL || window.webkitURL;
var blobURL;
$modal.on('show.bs.modal',function () {
// 如果打开模态框时没有选择文件就点击“打开图片”按钮
if(!$inputImage.val()){
$inputImage.click();
}
}).on('shown.bs.modal', function () {
// 重新创建
$image.cropper( $.extend(options, {
ready: function () {
// 当剪切界面就绪后,恢复数据
if(saveData.canvasData){
$image.cropper('setCanvasData', saveData.canvasData);
$image.cropper('setCropBoxData', saveData.cropBoxData);
}
}
}));
}).on('hidden.bs.modal', function () {
// 保存相关数据
saveData.cropBoxData = $image.cropper('getCropBoxData');
saveData.canvasData = $image.cropper('getCanvasData');
// 销毁并将图片保存在img标签
$image.cropper('destroy').attr('src',blobURL);
});
if (URL) {
$inputImage.change(function() {
var files = this.files;
var file;
if (!$image.data('cropper')) {
return;
}
if (files && files.length) {
file = files[0];
if (/^image\/\w+$/.test(file.type)) {
if(blobURL) {
URL.revokeObjectURL(blobURL);
}
blobURL = URL.createObjectURL(file);
// 重置cropper,将图像替换
$image.cropper('reset').cropper('replace', blobURL);
// 选择文件后,显示和隐藏相关内容
$('.img-container').removeClass('hidden');
$('.img-preview-box').removeClass('hidden');
$('#changeModal .disabled').removeAttr('disabled').removeClass('disabled');
$('#changeModal .tip-info').addClass('hidden');
} else {
window.alert('请选择一个图像文件!');
}
}
});
} else {
$inputImage.prop('disabled', true).addClass('disabled');
}
}
function getBase64Image(img) {
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, img.width, img.height);
var dataURL = canvas.toDataURL("image/png");
return dataURL
// return dataURL.replace("data:image/png;base64,", "");
}
var sendPhoto = function () {
// 得到PNG格式的dataURL
$('#photo').cropper('getCroppedCanvas',{
width:320,
height:180
}).toBlob(function(blob){
// 转化为blob后更改src属性,隐藏模态框
$('#user-photo').attr('src',URL.createObjectURL(blob));
$('#changeModal').modal('hide');
});
$('#changeModal').modal('hide');
}
$(function(){
initCropperInModal($('#photo'),$('#photoInput'),$('#changeModal'));
});
function resetModal() {
$('#add_car_type_form').find('input').val('');
$('#add_car_type_form').data('bootstrapValidator').destroy();
$('#add_car_type_form').data('bootstrapValidator', null);
$('#add_car_type').selectpicker('refresh');
vadidateModal();
} | eposit',
title: '所需押金',
sortable: true,
ed | conditional_block |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO ?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int |
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year != 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100 != 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
// ! =================
// ! SUBROUTINE NTODAT
// ! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE DTODAT
// ! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE MOMINT
// ! =================
//
// ! Compute month indices and weights for time interpolation from
// ! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode ?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight ! interpolation weight
//
// integer :: idatim(7) ! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
// ! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
// ! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
// ! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7) ! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0) ! median day a
//
// ! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1 ! next month (maybe 13)
// else
// kmonb = kmona - 1 ! previous month (maybe 0)
// endif
//
// ! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1 ! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7) ! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0) ! median day b
//
// ! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
} | identifier_body |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO ?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d | else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year != 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100 != 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
// ! =================
// ! SUBROUTINE NTODAT
// ! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE DTODAT
// ! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE MOMINT
// ! =================
//
// ! Compute month indices and weights for time interpolation from
// ! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode ?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight ! interpolation weight
//
// integer :: idatim(7) ! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
// ! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
// ! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
// ! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7) ! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0) ! median day a
//
// ! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1 ! next month (maybe 13)
// else
// kmonb = kmona - 1 ! previous month (maybe 0)
// endif
//
// ! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1 ! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7) ! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0) ! median day b
//
// ! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} | conditional_block |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO ?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn | (
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year != 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100 != 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
// ! =================
// ! SUBROUTINE NTODAT
// ! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE DTODAT
// ! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE MOMINT
// ! =================
//
// ! Compute month indices and weights for time interpolation from
// ! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode ?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight ! interpolation weight
//
// integer :: idatim(7) ! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
// ! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
// ! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
// ! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7) ! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0) ! median day a
//
// ! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1 ! next month (maybe 13)
// else
// kmonb = kmona - 1 ! previous month (maybe 0)
// endif
//
// ! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1 ! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7) ! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0) ! median day b
//
// ! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| cal2step | identifier_name |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO ?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1, | n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year != 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100 != 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7) ! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
// ! =================
// ! SUBROUTINE NTODAT
// ! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE DTODAT
// ! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
// ! =================
// ! SUBROUTINE MOMINT
// ! =================
//
// ! Compute month indices and weights for time interpolation from
// ! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode ?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight ! interpolation weight
//
// integer :: idatim(7) ! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
// ! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
// ! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
// ! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7) ! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0) ! median day a
//
// ! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1 ! next month (maybe 13)
// else
// kmonb = kmona - 1 ! previous month (maybe 0)
// endif
//
// ! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1 ! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7) ! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0) ! median day b
//
// ! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
// | ny001d: 365,
nud: 6,
n_days_per_month: 30, | random_line_split |
pages.go | package peji
import (
"bytes"
"context"
"strings"
"sync"
"time"
"github.com/influx6/npkg/njson"
"github.com/influx6/sabuhp"
"github.com/influx6/sabuhp/mixer"
"github.com/influx6/npkg/nerror"
"github.com/influx6/npkg/nxid"
"github.com/influx6/groundlayer/pkg/domu"
"github.com/influx6/groundlayer/pkg/styled"
)
const (
DefaultMaxPageIdleness = 5 * time.Minute
DefaultPageIdlenessChecksInterval = 2 * time.Minute
HeaderSessionIdName = "X-Void-Id"
QueryAndCookieSessionIdName = "_groundlayer_id"
)
type PageCreator func(name string, theme *styled.Theme, pubsub sabuhp.Transport) *Page
type PageSession struct {
lastUsed time.Time
Id nxid.ID
Page *Page
}
func (ps *PageSession) Close() {
ps.lastUsed = time.Time{}
ps.Page.Close()
}
type Logger interface {
Log(json *njson.JSON)
}
type PageSessionManager struct {
routePath string
maxIdle time.Duration
idleCheckInterval time.Duration
onAddRoute OnPage
theme *styled.Theme
Creator PageCreator
sessions map[string]PageSession
doFunc chan func()
canceler context.CancelFunc
ctx context.Context
waiter sync.WaitGroup
starter sync.Once
ender sync.Once
rl sync.Mutex
routes map[string]bool
}
func NewPageSessionManager(
ctx context.Context,
routePath string,
maxIdle time.Duration,
idleCheckInterval time.Duration,
creator PageCreator,
theme *styled.Theme,
onAddRoute OnPage,
) *PageSessionManager {
var newCtx, canceler = context.WithCancel(ctx)
return &PageSessionManager{
routePath: routePath,
ctx: newCtx,
canceler: canceler,
maxIdle: maxIdle,
Creator: creator,
theme: theme,
onAddRoute: onAddRoute,
idleCheckInterval: idleCheckInterval,
doFunc: make(chan func(), 0),
routes: map[string]bool{},
sessions: map[string]PageSession{},
}
}
func (psm *PageSessionManager) GetName() string {
return psm.routePath
}
func (psm *PageSessionManager) Wait() {
psm.waiter.Wait()
}
func (psm *PageSessionManager) Stop() {
psm.starter.Do(func() {
psm.canceler()
psm.waiter.Wait()
})
}
func (psm *PageSessionManager) Start() {
psm.starter.Do(func() {
psm.waiter.Add(1)
go psm.manage()
})
}
type SessionStat struct {
PageName string
TotalSessions int
Sessions map[string]time.Time
}
func (psm *PageSessionManager) Stat() (SessionStat, error) {
var session = make(chan SessionStat, 1)
psm.doFunc <- func() {
var stat SessionStat
stat.PageName = psm.routePath
stat.Sessions = map[string]time.Time{}
stat.TotalSessions = len(psm.sessions)
for _, ss := range psm.sessions {
stat.Sessions[ss.Id.String()] = ss.lastUsed
}
session <- stat
}
select {
case <-psm.ctx.Done():
return SessionStat{}, nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss, nil
}
}
// Retire returns closes a specific session using giving id.
func (psm *PageSessionManager) Retire(sessionId string) error {
var session = make(chan error, 1)
psm.doFunc <- func() {
if ss, hasSession := psm.sessions[sessionId]; hasSession {
delete(psm.sessions, sessionId)
ss.Page.Close()
}
session <- nil
}
select {
case <-psm.ctx.Done():
return nerror.WrapOnly(psm.ctx.Err())
case <-session:
return nil
}
}
// Session returns a giving page for a giving sessionId.
func (psm *PageSessionManager) Session(sessionId string) (*Page, error) {
var session = make(chan *Page, 1)
var errs = make(chan error, 1)
psm.doFunc <- func() {
var ss, hasSession = psm.sessions[sessionId]
if !hasSession {
errs <- nil
return
}
session <- ss.Page
}
select {
case <-psm.ctx.Done():
return nil, nerror.WrapOnly(psm.ctx.Err())
case err := <-errs:
return nil, nerror.WrapOnly(err)
case page := <-session:
return page, nil
}
}
// NewSession returns a new session page and session id.
func (psm *PageSessionManager) NewSession(t sabuhp.Transport) (*Page, string, error) {
var session = make(chan PageSession, 1)
psm.doFunc <- func() {
var ps PageSession
ps.Id = nxid.New()
ps.lastUsed = time.Now()
ps.Page = psm.Creator(psm.routePath, psm.theme, t)
psm.sessions[ps.Id.String()] = ps
ps.Page.OnPageAdd(psm.manageAddPageRoute)
session <- ps
}
select {
case <-psm.ctx.Done():
return nil, "", nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss.Page, ss.Id.String(), nil
}
}
func (psm *PageSessionManager) manageAddPageRoute(pageRoute string, p *Page) {
psm.rl.Lock()
if _, hasRoute := psm.routes[pageRoute]; hasRoute |
psm.routes[pageRoute] = true
psm.rl.Unlock()
psm.onAddRoute(pageRoute, p)
}
func (psm *PageSessionManager) manage() {
defer psm.waiter.Done()
var ticker = time.NewTicker(psm.idleCheckInterval)
defer ticker.Stop()
doLoop:
for {
select {
case <-psm.ctx.Done():
return
case doFn := <-psm.doFunc:
doFn()
case <-ticker.C:
// clean house
var nowTime = time.Now()
for key, session := range psm.sessions {
if nowTime.Sub(session.lastUsed) < psm.maxIdle {
continue doLoop
}
delete(psm.sessions, key)
session.Close()
}
}
}
}
var _ sabuhp.TransportResponse = (*Pages)(nil)
type OnPages func(route string, p *Pages)
// Pages exists to provider an organization
// around sessions and pages.
//
// It implements the http.Handler interface.
type Pages struct {
logger Logger
prefix string
theme *styled.Theme
router *mixer.Mux
maxIdle time.Duration
idleCheck time.Duration
ctx context.Context
tr sabuhp.Transport
sl sync.RWMutex
waiter sync.WaitGroup
managers map[string]*PageSessionManager
onNewPage *PageNotification
}
func WithPages(
ctx context.Context,
logger Logger,
prefix string,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
return NewPages(
ctx,
logger,
prefix,
DefaultMaxPageIdleness,
DefaultPageIdlenessChecksInterval,
theme,
transport,
notFound,
)
}
func NewPages(
ctx context.Context,
logger Logger,
prefix string,
maxIdle time.Duration,
idleCheck time.Duration,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
if notFound == nil {
notFound = DefaultNotFound{}
}
return &Pages{
theme: theme,
tr: transport,
prefix: prefix,
ctx: ctx,
logger: logger,
maxIdle: maxIdle,
idleCheck: idleCheck,
onNewPage: NewPageNotification(),
managers: map[string]*PageSessionManager{},
router: mixer.NewMux(mixer.MuxConfig{
RootPath: prefix,
NotFound: mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var payload, contentType, err = writeNode(message.ContentType, notFound.Handle(d))
if err != nil {
return nil, nerror.WrapOnly(err)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
FromAddr: prefix,
ID: nxid.ID{},
Topic: message.Path,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
}),
}),
}
}
func (p *Pages) Wait() {
p.waiter.Wait()
}
type PagesStat struct {
TotalPages int
PageSessions map[string]SessionStat
}
// Stats returns current states of existing pages, creators.
func (p *Pages) Stats() PagesStat {
var totalPages int
var stats = map[string]SessionStat{}
var stack = njson.Log(p.logger)
p.sl.RLock()
totalPages = len(p.managers)
for page, manager := range p.managers {
stat, err := manager.Stat()
if err != nil {
stack.New().LError().Error("error", err).
Message("failed to get stat for page").
String("page", page).
End()
continue
}
stats[page] = stat
}
p.sl.RUnlock()
var stat PagesStat
stat.PageSessions = stats
stat.TotalPages = totalPages
return stat
}
// GetManager returns page creator registered for a giving page page
func (p *Pages) Get(pageName string) (*PageSessionManager, error) {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
var manager, exists = p.managers[prefixPage]
if !exists {
manager, exists = p.managers[pageHandle]
}
p.sl.RUnlock()
if !exists {
return nil, nerror.New("not found")
}
return manager, nil
}
// HasPage returns true/false if a giving page exists.
func (p *Pages) Has(pageName string) bool {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
if _, exists := p.managers[prefixPage]; exists {
p.sl.RUnlock()
return true
}
if _, exists := p.managers[pageHandle]; exists {
p.sl.RUnlock()
return true
}
p.sl.RUnlock()
return false
}
// AddCreator adds a new PageCreator for a giving page routePath.
// It returns true/false based on whether the routePath and creator was registered or if there was routePath conflict.
func (p *Pages) Add(pageName string, creatorFunc PageCreator) error {
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
var routerPath = cleanAllSlashes(handlePath(pageName))
var routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, "*path"))
if p.Has(prefixPage) {
return nerror.New("already exists")
}
var manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit)
manager.Start()
p.waiter.Add(1)
go func() {
defer p.waiter.Done()
manager.Wait()
p.sl.Lock()
delete(p.managers, prefixPage)
p.sl.Unlock()
}()
p.sl.Lock()
p.managers[prefixPage] = manager
p.sl.Unlock()
var handler = createHandler(prefixPage, manager, p.tr)
p.router.Serve(routerPath, handler)
p.router.Serve(routerPathForMore, handler)
p.onNewPage.Emit(prefixPage, nil)
return nil
}
func (p *Pages) AddOnPageRoute(cb OnPages) {
p.onNewPage.Add(func(route string, _ *Page) {
cb(route, p)
})
}
func (p *Pages) Handle(message *sabuhp.Message, tr sabuhp.Transport) sabuhp.MessageErr {
var reply, err = p.router.ServeRoute(message)
if err != nil {
return sabuhp.WrapErr(err, false)
}
var sendErr error
if reply.Delivery == sabuhp.SendToAll {
sendErr = tr.SendToAll(reply, -1)
} else {
sendErr = tr.SendToOne(reply, -1)
}
if sendErr != nil {
return sabuhp.WrapErr(sendErr, false)
}
return nil
}
func createHandler(pagePath string, manager *PageSessionManager, tr sabuhp.Transport) mixer.Handler {
return mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var page *Page
var sessionErr error
page, sessionErr = manager.Session(d.SessionId)
if sessionErr != nil {
page, d.SessionId, sessionErr = manager.NewSession(tr)
}
if sessionErr != nil {
return nil, nerror.WrapOnly(sessionErr)
}
var renderNode = page.Render(d)
var payload, contentType, writeErr = writeNode(message.ContentType, renderNode)
if writeErr != nil {
return nil, nerror.WrapOnly(writeErr)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
ID: nxid.ID{},
Topic: message.Path,
FromAddr: pagePath,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
})
}
func writeNode(contentType string, renderNode *domu.Node) ([]byte, string, error) {
var content string
var renderedOutput = bytes.NewBuffer(make([]byte, 0, 512))
switch contentType {
case VoidHTMLDiff:
content = VoidHTMLDiff
if renderErr := RenderVoidHTMLDiff(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSON:
content = VoidJSON
if renderErr := RenderVoidJSON(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSONStream:
content = VoidJSONStream
if renderErr := RenderVoidJSONStream(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidHTML:
fallthrough
default:
content = PlainHTML
if renderErr := RenderVoidHTML(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
}
return renderedOutput.Bytes(), content, nil
}
func getSessionId(message *sabuhp.Message) (string, error) {
var sessionIdFromQuery = strings.TrimSpace(message.Query.Get(QueryAndCookieSessionIdName))
var sessionIdFromHeader = strings.TrimSpace(message.Headers.Get(HeaderSessionIdName))
var isASession = len(sessionIdFromHeader) != 0 || len(sessionIdFromQuery) != 0
if !isASession {
return "", nerror.New("not a session")
}
if len(sessionIdFromQuery) != 0 {
return sessionIdFromQuery, nil
}
return sessionIdFromHeader, nil
}
| {
psm.rl.Unlock()
return
} | conditional_block |
pages.go | package peji
import (
"bytes"
"context"
"strings"
"sync"
"time"
"github.com/influx6/npkg/njson"
"github.com/influx6/sabuhp"
"github.com/influx6/sabuhp/mixer"
"github.com/influx6/npkg/nerror"
"github.com/influx6/npkg/nxid"
"github.com/influx6/groundlayer/pkg/domu"
"github.com/influx6/groundlayer/pkg/styled"
)
const (
DefaultMaxPageIdleness = 5 * time.Minute
DefaultPageIdlenessChecksInterval = 2 * time.Minute
HeaderSessionIdName = "X-Void-Id"
QueryAndCookieSessionIdName = "_groundlayer_id"
)
type PageCreator func(name string, theme *styled.Theme, pubsub sabuhp.Transport) *Page
type PageSession struct {
lastUsed time.Time
Id nxid.ID
Page *Page
}
func (ps *PageSession) Close() {
ps.lastUsed = time.Time{}
ps.Page.Close()
}
type Logger interface {
Log(json *njson.JSON)
}
type PageSessionManager struct {
routePath string
maxIdle time.Duration
idleCheckInterval time.Duration
onAddRoute OnPage
theme *styled.Theme
Creator PageCreator
sessions map[string]PageSession
doFunc chan func()
canceler context.CancelFunc
ctx context.Context
waiter sync.WaitGroup
starter sync.Once
ender sync.Once
rl sync.Mutex
routes map[string]bool
}
func NewPageSessionManager(
ctx context.Context,
routePath string,
maxIdle time.Duration,
idleCheckInterval time.Duration,
creator PageCreator,
theme *styled.Theme,
onAddRoute OnPage,
) *PageSessionManager {
var newCtx, canceler = context.WithCancel(ctx)
return &PageSessionManager{
routePath: routePath,
ctx: newCtx,
canceler: canceler,
maxIdle: maxIdle,
Creator: creator,
theme: theme,
onAddRoute: onAddRoute,
idleCheckInterval: idleCheckInterval,
doFunc: make(chan func(), 0),
routes: map[string]bool{},
sessions: map[string]PageSession{},
}
}
func (psm *PageSessionManager) GetName() string {
return psm.routePath
}
func (psm *PageSessionManager) Wait() {
psm.waiter.Wait()
}
func (psm *PageSessionManager) Stop() {
psm.starter.Do(func() {
psm.canceler()
psm.waiter.Wait()
})
}
func (psm *PageSessionManager) Start() {
psm.starter.Do(func() {
psm.waiter.Add(1)
go psm.manage()
})
}
type SessionStat struct {
PageName string
TotalSessions int
Sessions map[string]time.Time
}
func (psm *PageSessionManager) Stat() (SessionStat, error) {
var session = make(chan SessionStat, 1)
psm.doFunc <- func() {
var stat SessionStat
stat.PageName = psm.routePath
stat.Sessions = map[string]time.Time{}
stat.TotalSessions = len(psm.sessions)
for _, ss := range psm.sessions {
stat.Sessions[ss.Id.String()] = ss.lastUsed
}
session <- stat
}
select {
case <-psm.ctx.Done():
return SessionStat{}, nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss, nil
}
}
// Retire returns closes a specific session using giving id.
func (psm *PageSessionManager) Retire(sessionId string) error {
var session = make(chan error, 1)
psm.doFunc <- func() {
if ss, hasSession := psm.sessions[sessionId]; hasSession {
delete(psm.sessions, sessionId)
ss.Page.Close()
}
session <- nil
}
select {
case <-psm.ctx.Done():
return nerror.WrapOnly(psm.ctx.Err())
case <-session:
return nil
}
}
// Session returns a giving page for a giving sessionId.
func (psm *PageSessionManager) Session(sessionId string) (*Page, error) {
var session = make(chan *Page, 1)
var errs = make(chan error, 1)
psm.doFunc <- func() {
var ss, hasSession = psm.sessions[sessionId]
if !hasSession {
errs <- nil
return
}
session <- ss.Page
}
select {
case <-psm.ctx.Done():
return nil, nerror.WrapOnly(psm.ctx.Err())
case err := <-errs:
return nil, nerror.WrapOnly(err)
case page := <-session:
return page, nil
}
}
// NewSession returns a new session page and session id.
func (psm *PageSessionManager) NewSession(t sabuhp.Transport) (*Page, string, error) {
var session = make(chan PageSession, 1)
psm.doFunc <- func() {
var ps PageSession
ps.Id = nxid.New()
ps.lastUsed = time.Now()
ps.Page = psm.Creator(psm.routePath, psm.theme, t)
psm.sessions[ps.Id.String()] = ps
ps.Page.OnPageAdd(psm.manageAddPageRoute)
session <- ps
}
select {
case <-psm.ctx.Done():
return nil, "", nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss.Page, ss.Id.String(), nil
}
}
func (psm *PageSessionManager) manageAddPageRoute(pageRoute string, p *Page) {
psm.rl.Lock()
if _, hasRoute := psm.routes[pageRoute]; hasRoute {
psm.rl.Unlock()
return
}
psm.routes[pageRoute] = true
psm.rl.Unlock()
psm.onAddRoute(pageRoute, p)
}
func (psm *PageSessionManager) manage() {
defer psm.waiter.Done()
var ticker = time.NewTicker(psm.idleCheckInterval)
defer ticker.Stop()
doLoop:
for {
select {
case <-psm.ctx.Done():
return
case doFn := <-psm.doFunc:
doFn()
case <-ticker.C:
// clean house
var nowTime = time.Now()
for key, session := range psm.sessions {
if nowTime.Sub(session.lastUsed) < psm.maxIdle {
continue doLoop
}
delete(psm.sessions, key)
session.Close()
}
}
}
}
var _ sabuhp.TransportResponse = (*Pages)(nil)
type OnPages func(route string, p *Pages)
// Pages exists to provider an organization
// around sessions and pages.
//
// It implements the http.Handler interface.
type Pages struct {
logger Logger
prefix string
theme *styled.Theme
router *mixer.Mux
maxIdle time.Duration
idleCheck time.Duration
ctx context.Context
tr sabuhp.Transport
sl sync.RWMutex
waiter sync.WaitGroup
managers map[string]*PageSessionManager
onNewPage *PageNotification
}
func WithPages(
ctx context.Context,
logger Logger,
prefix string,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
return NewPages(
ctx,
logger,
prefix,
DefaultMaxPageIdleness,
DefaultPageIdlenessChecksInterval,
theme,
transport,
notFound,
)
}
func NewPages(
ctx context.Context,
logger Logger,
prefix string,
maxIdle time.Duration,
idleCheck time.Duration,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
if notFound == nil {
notFound = DefaultNotFound{}
}
return &Pages{
theme: theme,
tr: transport,
prefix: prefix,
ctx: ctx,
logger: logger,
maxIdle: maxIdle,
idleCheck: idleCheck,
onNewPage: NewPageNotification(),
managers: map[string]*PageSessionManager{},
router: mixer.NewMux(mixer.MuxConfig{
RootPath: prefix,
NotFound: mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var payload, contentType, err = writeNode(message.ContentType, notFound.Handle(d))
if err != nil {
return nil, nerror.WrapOnly(err)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
FromAddr: prefix,
ID: nxid.ID{},
Topic: message.Path,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
}),
}),
}
}
func (p *Pages) Wait() {
p.waiter.Wait()
}
type PagesStat struct {
TotalPages int
PageSessions map[string]SessionStat
}
// Stats returns current states of existing pages, creators.
func (p *Pages) Stats() PagesStat {
var totalPages int
var stats = map[string]SessionStat{}
var stack = njson.Log(p.logger)
p.sl.RLock()
totalPages = len(p.managers)
for page, manager := range p.managers {
stat, err := manager.Stat()
if err != nil {
stack.New().LError().Error("error", err).
Message("failed to get stat for page").
String("page", page).
End()
continue
}
stats[page] = stat
}
p.sl.RUnlock()
var stat PagesStat
stat.PageSessions = stats
stat.TotalPages = totalPages
return stat
}
// GetManager returns page creator registered for a giving page page
func (p *Pages) | (pageName string) (*PageSessionManager, error) {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
var manager, exists = p.managers[prefixPage]
if !exists {
manager, exists = p.managers[pageHandle]
}
p.sl.RUnlock()
if !exists {
return nil, nerror.New("not found")
}
return manager, nil
}
// HasPage returns true/false if a giving page exists.
func (p *Pages) Has(pageName string) bool {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
if _, exists := p.managers[prefixPage]; exists {
p.sl.RUnlock()
return true
}
if _, exists := p.managers[pageHandle]; exists {
p.sl.RUnlock()
return true
}
p.sl.RUnlock()
return false
}
// AddCreator adds a new PageCreator for a giving page routePath.
// It returns true/false based on whether the routePath and creator was registered or if there was routePath conflict.
func (p *Pages) Add(pageName string, creatorFunc PageCreator) error {
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
var routerPath = cleanAllSlashes(handlePath(pageName))
var routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, "*path"))
if p.Has(prefixPage) {
return nerror.New("already exists")
}
var manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit)
manager.Start()
p.waiter.Add(1)
go func() {
defer p.waiter.Done()
manager.Wait()
p.sl.Lock()
delete(p.managers, prefixPage)
p.sl.Unlock()
}()
p.sl.Lock()
p.managers[prefixPage] = manager
p.sl.Unlock()
var handler = createHandler(prefixPage, manager, p.tr)
p.router.Serve(routerPath, handler)
p.router.Serve(routerPathForMore, handler)
p.onNewPage.Emit(prefixPage, nil)
return nil
}
func (p *Pages) AddOnPageRoute(cb OnPages) {
p.onNewPage.Add(func(route string, _ *Page) {
cb(route, p)
})
}
func (p *Pages) Handle(message *sabuhp.Message, tr sabuhp.Transport) sabuhp.MessageErr {
var reply, err = p.router.ServeRoute(message)
if err != nil {
return sabuhp.WrapErr(err, false)
}
var sendErr error
if reply.Delivery == sabuhp.SendToAll {
sendErr = tr.SendToAll(reply, -1)
} else {
sendErr = tr.SendToOne(reply, -1)
}
if sendErr != nil {
return sabuhp.WrapErr(sendErr, false)
}
return nil
}
func createHandler(pagePath string, manager *PageSessionManager, tr sabuhp.Transport) mixer.Handler {
return mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var page *Page
var sessionErr error
page, sessionErr = manager.Session(d.SessionId)
if sessionErr != nil {
page, d.SessionId, sessionErr = manager.NewSession(tr)
}
if sessionErr != nil {
return nil, nerror.WrapOnly(sessionErr)
}
var renderNode = page.Render(d)
var payload, contentType, writeErr = writeNode(message.ContentType, renderNode)
if writeErr != nil {
return nil, nerror.WrapOnly(writeErr)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
ID: nxid.ID{},
Topic: message.Path,
FromAddr: pagePath,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
})
}
func writeNode(contentType string, renderNode *domu.Node) ([]byte, string, error) {
var content string
var renderedOutput = bytes.NewBuffer(make([]byte, 0, 512))
switch contentType {
case VoidHTMLDiff:
content = VoidHTMLDiff
if renderErr := RenderVoidHTMLDiff(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSON:
content = VoidJSON
if renderErr := RenderVoidJSON(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSONStream:
content = VoidJSONStream
if renderErr := RenderVoidJSONStream(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidHTML:
fallthrough
default:
content = PlainHTML
if renderErr := RenderVoidHTML(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
}
return renderedOutput.Bytes(), content, nil
}
func getSessionId(message *sabuhp.Message) (string, error) {
var sessionIdFromQuery = strings.TrimSpace(message.Query.Get(QueryAndCookieSessionIdName))
var sessionIdFromHeader = strings.TrimSpace(message.Headers.Get(HeaderSessionIdName))
var isASession = len(sessionIdFromHeader) != 0 || len(sessionIdFromQuery) != 0
if !isASession {
return "", nerror.New("not a session")
}
if len(sessionIdFromQuery) != 0 {
return sessionIdFromQuery, nil
}
return sessionIdFromHeader, nil
}
| Get | identifier_name |
pages.go | package peji
import (
"bytes"
"context"
"strings"
"sync"
"time"
"github.com/influx6/npkg/njson"
"github.com/influx6/sabuhp"
"github.com/influx6/sabuhp/mixer"
"github.com/influx6/npkg/nerror"
"github.com/influx6/npkg/nxid"
"github.com/influx6/groundlayer/pkg/domu"
"github.com/influx6/groundlayer/pkg/styled"
)
const (
DefaultMaxPageIdleness = 5 * time.Minute
DefaultPageIdlenessChecksInterval = 2 * time.Minute
HeaderSessionIdName = "X-Void-Id"
QueryAndCookieSessionIdName = "_groundlayer_id"
)
type PageCreator func(name string, theme *styled.Theme, pubsub sabuhp.Transport) *Page
type PageSession struct {
lastUsed time.Time
Id nxid.ID
Page *Page
}
func (ps *PageSession) Close() {
ps.lastUsed = time.Time{}
ps.Page.Close()
}
type Logger interface {
Log(json *njson.JSON)
}
type PageSessionManager struct {
routePath string
maxIdle time.Duration
idleCheckInterval time.Duration
onAddRoute OnPage
theme *styled.Theme
Creator PageCreator
sessions map[string]PageSession
doFunc chan func()
canceler context.CancelFunc
ctx context.Context
waiter sync.WaitGroup
starter sync.Once
ender sync.Once
rl sync.Mutex
routes map[string]bool
}
func NewPageSessionManager(
ctx context.Context,
routePath string,
maxIdle time.Duration,
idleCheckInterval time.Duration,
creator PageCreator,
theme *styled.Theme,
onAddRoute OnPage,
) *PageSessionManager {
var newCtx, canceler = context.WithCancel(ctx)
return &PageSessionManager{
routePath: routePath,
ctx: newCtx,
canceler: canceler,
maxIdle: maxIdle,
Creator: creator,
theme: theme,
onAddRoute: onAddRoute,
idleCheckInterval: idleCheckInterval,
doFunc: make(chan func(), 0),
routes: map[string]bool{},
sessions: map[string]PageSession{},
}
}
func (psm *PageSessionManager) GetName() string |
func (psm *PageSessionManager) Wait() {
psm.waiter.Wait()
}
func (psm *PageSessionManager) Stop() {
psm.starter.Do(func() {
psm.canceler()
psm.waiter.Wait()
})
}
func (psm *PageSessionManager) Start() {
psm.starter.Do(func() {
psm.waiter.Add(1)
go psm.manage()
})
}
type SessionStat struct {
PageName string
TotalSessions int
Sessions map[string]time.Time
}
func (psm *PageSessionManager) Stat() (SessionStat, error) {
var session = make(chan SessionStat, 1)
psm.doFunc <- func() {
var stat SessionStat
stat.PageName = psm.routePath
stat.Sessions = map[string]time.Time{}
stat.TotalSessions = len(psm.sessions)
for _, ss := range psm.sessions {
stat.Sessions[ss.Id.String()] = ss.lastUsed
}
session <- stat
}
select {
case <-psm.ctx.Done():
return SessionStat{}, nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss, nil
}
}
// Retire returns closes a specific session using giving id.
func (psm *PageSessionManager) Retire(sessionId string) error {
var session = make(chan error, 1)
psm.doFunc <- func() {
if ss, hasSession := psm.sessions[sessionId]; hasSession {
delete(psm.sessions, sessionId)
ss.Page.Close()
}
session <- nil
}
select {
case <-psm.ctx.Done():
return nerror.WrapOnly(psm.ctx.Err())
case <-session:
return nil
}
}
// Session returns a giving page for a giving sessionId.
func (psm *PageSessionManager) Session(sessionId string) (*Page, error) {
var session = make(chan *Page, 1)
var errs = make(chan error, 1)
psm.doFunc <- func() {
var ss, hasSession = psm.sessions[sessionId]
if !hasSession {
errs <- nil
return
}
session <- ss.Page
}
select {
case <-psm.ctx.Done():
return nil, nerror.WrapOnly(psm.ctx.Err())
case err := <-errs:
return nil, nerror.WrapOnly(err)
case page := <-session:
return page, nil
}
}
// NewSession returns a new session page and session id.
func (psm *PageSessionManager) NewSession(t sabuhp.Transport) (*Page, string, error) {
var session = make(chan PageSession, 1)
psm.doFunc <- func() {
var ps PageSession
ps.Id = nxid.New()
ps.lastUsed = time.Now()
ps.Page = psm.Creator(psm.routePath, psm.theme, t)
psm.sessions[ps.Id.String()] = ps
ps.Page.OnPageAdd(psm.manageAddPageRoute)
session <- ps
}
select {
case <-psm.ctx.Done():
return nil, "", nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss.Page, ss.Id.String(), nil
}
}
func (psm *PageSessionManager) manageAddPageRoute(pageRoute string, p *Page) {
psm.rl.Lock()
if _, hasRoute := psm.routes[pageRoute]; hasRoute {
psm.rl.Unlock()
return
}
psm.routes[pageRoute] = true
psm.rl.Unlock()
psm.onAddRoute(pageRoute, p)
}
func (psm *PageSessionManager) manage() {
defer psm.waiter.Done()
var ticker = time.NewTicker(psm.idleCheckInterval)
defer ticker.Stop()
doLoop:
for {
select {
case <-psm.ctx.Done():
return
case doFn := <-psm.doFunc:
doFn()
case <-ticker.C:
// clean house
var nowTime = time.Now()
for key, session := range psm.sessions {
if nowTime.Sub(session.lastUsed) < psm.maxIdle {
continue doLoop
}
delete(psm.sessions, key)
session.Close()
}
}
}
}
var _ sabuhp.TransportResponse = (*Pages)(nil)
type OnPages func(route string, p *Pages)
// Pages exists to provider an organization
// around sessions and pages.
//
// It implements the http.Handler interface.
type Pages struct {
logger Logger
prefix string
theme *styled.Theme
router *mixer.Mux
maxIdle time.Duration
idleCheck time.Duration
ctx context.Context
tr sabuhp.Transport
sl sync.RWMutex
waiter sync.WaitGroup
managers map[string]*PageSessionManager
onNewPage *PageNotification
}
func WithPages(
ctx context.Context,
logger Logger,
prefix string,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
return NewPages(
ctx,
logger,
prefix,
DefaultMaxPageIdleness,
DefaultPageIdlenessChecksInterval,
theme,
transport,
notFound,
)
}
func NewPages(
ctx context.Context,
logger Logger,
prefix string,
maxIdle time.Duration,
idleCheck time.Duration,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
if notFound == nil {
notFound = DefaultNotFound{}
}
return &Pages{
theme: theme,
tr: transport,
prefix: prefix,
ctx: ctx,
logger: logger,
maxIdle: maxIdle,
idleCheck: idleCheck,
onNewPage: NewPageNotification(),
managers: map[string]*PageSessionManager{},
router: mixer.NewMux(mixer.MuxConfig{
RootPath: prefix,
NotFound: mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var payload, contentType, err = writeNode(message.ContentType, notFound.Handle(d))
if err != nil {
return nil, nerror.WrapOnly(err)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
FromAddr: prefix,
ID: nxid.ID{},
Topic: message.Path,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
}),
}),
}
}
func (p *Pages) Wait() {
p.waiter.Wait()
}
type PagesStat struct {
TotalPages int
PageSessions map[string]SessionStat
}
// Stats returns current states of existing pages, creators.
func (p *Pages) Stats() PagesStat {
var totalPages int
var stats = map[string]SessionStat{}
var stack = njson.Log(p.logger)
p.sl.RLock()
totalPages = len(p.managers)
for page, manager := range p.managers {
stat, err := manager.Stat()
if err != nil {
stack.New().LError().Error("error", err).
Message("failed to get stat for page").
String("page", page).
End()
continue
}
stats[page] = stat
}
p.sl.RUnlock()
var stat PagesStat
stat.PageSessions = stats
stat.TotalPages = totalPages
return stat
}
// GetManager returns page creator registered for a giving page page
func (p *Pages) Get(pageName string) (*PageSessionManager, error) {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
var manager, exists = p.managers[prefixPage]
if !exists {
manager, exists = p.managers[pageHandle]
}
p.sl.RUnlock()
if !exists {
return nil, nerror.New("not found")
}
return manager, nil
}
// HasPage returns true/false if a giving page exists.
func (p *Pages) Has(pageName string) bool {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
if _, exists := p.managers[prefixPage]; exists {
p.sl.RUnlock()
return true
}
if _, exists := p.managers[pageHandle]; exists {
p.sl.RUnlock()
return true
}
p.sl.RUnlock()
return false
}
// AddCreator adds a new PageCreator for a giving page routePath.
// It returns true/false based on whether the routePath and creator was registered or if there was routePath conflict.
func (p *Pages) Add(pageName string, creatorFunc PageCreator) error {
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
var routerPath = cleanAllSlashes(handlePath(pageName))
var routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, "*path"))
if p.Has(prefixPage) {
return nerror.New("already exists")
}
var manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit)
manager.Start()
p.waiter.Add(1)
go func() {
defer p.waiter.Done()
manager.Wait()
p.sl.Lock()
delete(p.managers, prefixPage)
p.sl.Unlock()
}()
p.sl.Lock()
p.managers[prefixPage] = manager
p.sl.Unlock()
var handler = createHandler(prefixPage, manager, p.tr)
p.router.Serve(routerPath, handler)
p.router.Serve(routerPathForMore, handler)
p.onNewPage.Emit(prefixPage, nil)
return nil
}
func (p *Pages) AddOnPageRoute(cb OnPages) {
p.onNewPage.Add(func(route string, _ *Page) {
cb(route, p)
})
}
func (p *Pages) Handle(message *sabuhp.Message, tr sabuhp.Transport) sabuhp.MessageErr {
var reply, err = p.router.ServeRoute(message)
if err != nil {
return sabuhp.WrapErr(err, false)
}
var sendErr error
if reply.Delivery == sabuhp.SendToAll {
sendErr = tr.SendToAll(reply, -1)
} else {
sendErr = tr.SendToOne(reply, -1)
}
if sendErr != nil {
return sabuhp.WrapErr(sendErr, false)
}
return nil
}
func createHandler(pagePath string, manager *PageSessionManager, tr sabuhp.Transport) mixer.Handler {
return mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var page *Page
var sessionErr error
page, sessionErr = manager.Session(d.SessionId)
if sessionErr != nil {
page, d.SessionId, sessionErr = manager.NewSession(tr)
}
if sessionErr != nil {
return nil, nerror.WrapOnly(sessionErr)
}
var renderNode = page.Render(d)
var payload, contentType, writeErr = writeNode(message.ContentType, renderNode)
if writeErr != nil {
return nil, nerror.WrapOnly(writeErr)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
ID: nxid.ID{},
Topic: message.Path,
FromAddr: pagePath,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
})
}
func writeNode(contentType string, renderNode *domu.Node) ([]byte, string, error) {
var content string
var renderedOutput = bytes.NewBuffer(make([]byte, 0, 512))
switch contentType {
case VoidHTMLDiff:
content = VoidHTMLDiff
if renderErr := RenderVoidHTMLDiff(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSON:
content = VoidJSON
if renderErr := RenderVoidJSON(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSONStream:
content = VoidJSONStream
if renderErr := RenderVoidJSONStream(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidHTML:
fallthrough
default:
content = PlainHTML
if renderErr := RenderVoidHTML(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
}
return renderedOutput.Bytes(), content, nil
}
func getSessionId(message *sabuhp.Message) (string, error) {
var sessionIdFromQuery = strings.TrimSpace(message.Query.Get(QueryAndCookieSessionIdName))
var sessionIdFromHeader = strings.TrimSpace(message.Headers.Get(HeaderSessionIdName))
var isASession = len(sessionIdFromHeader) != 0 || len(sessionIdFromQuery) != 0
if !isASession {
return "", nerror.New("not a session")
}
if len(sessionIdFromQuery) != 0 {
return sessionIdFromQuery, nil
}
return sessionIdFromHeader, nil
}
| {
return psm.routePath
} | identifier_body |
pages.go | package peji
import (
"bytes"
"context"
"strings"
"sync"
"time"
"github.com/influx6/npkg/njson"
"github.com/influx6/sabuhp"
"github.com/influx6/sabuhp/mixer"
"github.com/influx6/npkg/nerror"
"github.com/influx6/npkg/nxid"
"github.com/influx6/groundlayer/pkg/domu"
"github.com/influx6/groundlayer/pkg/styled"
)
const (
DefaultMaxPageIdleness = 5 * time.Minute
DefaultPageIdlenessChecksInterval = 2 * time.Minute
HeaderSessionIdName = "X-Void-Id"
QueryAndCookieSessionIdName = "_groundlayer_id"
)
type PageCreator func(name string, theme *styled.Theme, pubsub sabuhp.Transport) *Page
type PageSession struct {
lastUsed time.Time
Id nxid.ID
Page *Page
}
func (ps *PageSession) Close() {
ps.lastUsed = time.Time{}
ps.Page.Close()
}
type Logger interface {
Log(json *njson.JSON)
}
type PageSessionManager struct {
routePath string
maxIdle time.Duration
idleCheckInterval time.Duration
onAddRoute OnPage
theme *styled.Theme
Creator PageCreator
sessions map[string]PageSession
doFunc chan func()
canceler context.CancelFunc
ctx context.Context
waiter sync.WaitGroup
starter sync.Once
ender sync.Once
rl sync.Mutex
routes map[string]bool
}
func NewPageSessionManager(
ctx context.Context,
routePath string,
maxIdle time.Duration,
idleCheckInterval time.Duration,
creator PageCreator,
theme *styled.Theme,
onAddRoute OnPage,
) *PageSessionManager {
var newCtx, canceler = context.WithCancel(ctx)
return &PageSessionManager{
routePath: routePath,
ctx: newCtx,
canceler: canceler,
maxIdle: maxIdle,
Creator: creator,
theme: theme,
onAddRoute: onAddRoute,
idleCheckInterval: idleCheckInterval,
doFunc: make(chan func(), 0),
routes: map[string]bool{},
sessions: map[string]PageSession{},
}
}
func (psm *PageSessionManager) GetName() string {
return psm.routePath
}
func (psm *PageSessionManager) Wait() {
psm.waiter.Wait()
}
func (psm *PageSessionManager) Stop() {
psm.starter.Do(func() {
psm.canceler()
psm.waiter.Wait()
})
}
func (psm *PageSessionManager) Start() {
psm.starter.Do(func() {
psm.waiter.Add(1)
go psm.manage()
})
}
type SessionStat struct {
PageName string
TotalSessions int
Sessions map[string]time.Time
}
func (psm *PageSessionManager) Stat() (SessionStat, error) {
var session = make(chan SessionStat, 1)
psm.doFunc <- func() {
var stat SessionStat
stat.PageName = psm.routePath
stat.Sessions = map[string]time.Time{}
stat.TotalSessions = len(psm.sessions)
for _, ss := range psm.sessions {
stat.Sessions[ss.Id.String()] = ss.lastUsed
}
session <- stat
}
select {
case <-psm.ctx.Done():
return SessionStat{}, nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss, nil
}
}
// Retire returns closes a specific session using giving id.
func (psm *PageSessionManager) Retire(sessionId string) error {
var session = make(chan error, 1)
psm.doFunc <- func() {
if ss, hasSession := psm.sessions[sessionId]; hasSession {
delete(psm.sessions, sessionId)
ss.Page.Close()
}
session <- nil
}
select {
case <-psm.ctx.Done():
return nerror.WrapOnly(psm.ctx.Err())
case <-session:
return nil
}
}
// Session returns a giving page for a giving sessionId.
func (psm *PageSessionManager) Session(sessionId string) (*Page, error) {
var session = make(chan *Page, 1)
var errs = make(chan error, 1)
psm.doFunc <- func() {
var ss, hasSession = psm.sessions[sessionId]
if !hasSession {
errs <- nil
return
}
session <- ss.Page
}
select {
case <-psm.ctx.Done():
return nil, nerror.WrapOnly(psm.ctx.Err())
case err := <-errs:
return nil, nerror.WrapOnly(err)
case page := <-session:
return page, nil
}
}
// NewSession returns a new session page and session id.
func (psm *PageSessionManager) NewSession(t sabuhp.Transport) (*Page, string, error) {
var session = make(chan PageSession, 1)
psm.doFunc <- func() {
var ps PageSession
ps.Id = nxid.New()
ps.lastUsed = time.Now()
ps.Page = psm.Creator(psm.routePath, psm.theme, t)
psm.sessions[ps.Id.String()] = ps
ps.Page.OnPageAdd(psm.manageAddPageRoute)
session <- ps
}
select {
case <-psm.ctx.Done():
return nil, "", nerror.WrapOnly(psm.ctx.Err())
case ss := <-session:
return ss.Page, ss.Id.String(), nil
}
}
func (psm *PageSessionManager) manageAddPageRoute(pageRoute string, p *Page) {
psm.rl.Lock()
if _, hasRoute := psm.routes[pageRoute]; hasRoute {
psm.rl.Unlock()
return
}
psm.routes[pageRoute] = true
psm.rl.Unlock()
psm.onAddRoute(pageRoute, p)
}
func (psm *PageSessionManager) manage() {
defer psm.waiter.Done()
var ticker = time.NewTicker(psm.idleCheckInterval)
defer ticker.Stop()
doLoop:
for {
select {
case <-psm.ctx.Done():
return
case doFn := <-psm.doFunc:
doFn()
case <-ticker.C:
// clean house
var nowTime = time.Now()
for key, session := range psm.sessions {
if nowTime.Sub(session.lastUsed) < psm.maxIdle {
continue doLoop
}
delete(psm.sessions, key)
session.Close()
}
}
}
}
var _ sabuhp.TransportResponse = (*Pages)(nil)
type OnPages func(route string, p *Pages)
// Pages exists to provider an organization
// around sessions and pages.
//
// It implements the http.Handler interface.
type Pages struct {
logger Logger
prefix string
theme *styled.Theme
router *mixer.Mux
maxIdle time.Duration
idleCheck time.Duration
ctx context.Context
tr sabuhp.Transport
sl sync.RWMutex
waiter sync.WaitGroup
managers map[string]*PageSessionManager | onNewPage *PageNotification
}
func WithPages(
ctx context.Context,
logger Logger,
prefix string,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
return NewPages(
ctx,
logger,
prefix,
DefaultMaxPageIdleness,
DefaultPageIdlenessChecksInterval,
theme,
transport,
notFound,
)
}
func NewPages(
ctx context.Context,
logger Logger,
prefix string,
maxIdle time.Duration,
idleCheck time.Duration,
theme *styled.Theme,
transport sabuhp.Transport,
notFound Handler,
) *Pages {
if notFound == nil {
notFound = DefaultNotFound{}
}
return &Pages{
theme: theme,
tr: transport,
prefix: prefix,
ctx: ctx,
logger: logger,
maxIdle: maxIdle,
idleCheck: idleCheck,
onNewPage: NewPageNotification(),
managers: map[string]*PageSessionManager{},
router: mixer.NewMux(mixer.MuxConfig{
RootPath: prefix,
NotFound: mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var payload, contentType, err = writeNode(message.ContentType, notFound.Handle(d))
if err != nil {
return nil, nerror.WrapOnly(err)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
FromAddr: prefix,
ID: nxid.ID{},
Topic: message.Path,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
}),
}),
}
}
func (p *Pages) Wait() {
p.waiter.Wait()
}
type PagesStat struct {
TotalPages int
PageSessions map[string]SessionStat
}
// Stats returns current states of existing pages, creators.
func (p *Pages) Stats() PagesStat {
var totalPages int
var stats = map[string]SessionStat{}
var stack = njson.Log(p.logger)
p.sl.RLock()
totalPages = len(p.managers)
for page, manager := range p.managers {
stat, err := manager.Stat()
if err != nil {
stack.New().LError().Error("error", err).
Message("failed to get stat for page").
String("page", page).
End()
continue
}
stats[page] = stat
}
p.sl.RUnlock()
var stat PagesStat
stat.PageSessions = stats
stat.TotalPages = totalPages
return stat
}
// GetManager returns page creator registered for a giving page page
func (p *Pages) Get(pageName string) (*PageSessionManager, error) {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
var manager, exists = p.managers[prefixPage]
if !exists {
manager, exists = p.managers[pageHandle]
}
p.sl.RUnlock()
if !exists {
return nil, nerror.New("not found")
}
return manager, nil
}
// HasPage returns true/false if a giving page exists.
func (p *Pages) Has(pageName string) bool {
var pageHandle = cleanAllSlashes(handlePath(pageName))
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
p.sl.RLock()
if _, exists := p.managers[prefixPage]; exists {
p.sl.RUnlock()
return true
}
if _, exists := p.managers[pageHandle]; exists {
p.sl.RUnlock()
return true
}
p.sl.RUnlock()
return false
}
// AddCreator adds a new PageCreator for a giving page routePath.
// It returns true/false based on whether the routePath and creator was registered or if there was routePath conflict.
func (p *Pages) Add(pageName string, creatorFunc PageCreator) error {
var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))
var routerPath = cleanAllSlashes(handlePath(pageName))
var routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, "*path"))
if p.Has(prefixPage) {
return nerror.New("already exists")
}
var manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit)
manager.Start()
p.waiter.Add(1)
go func() {
defer p.waiter.Done()
manager.Wait()
p.sl.Lock()
delete(p.managers, prefixPage)
p.sl.Unlock()
}()
p.sl.Lock()
p.managers[prefixPage] = manager
p.sl.Unlock()
var handler = createHandler(prefixPage, manager, p.tr)
p.router.Serve(routerPath, handler)
p.router.Serve(routerPathForMore, handler)
p.onNewPage.Emit(prefixPage, nil)
return nil
}
func (p *Pages) AddOnPageRoute(cb OnPages) {
p.onNewPage.Add(func(route string, _ *Page) {
cb(route, p)
})
}
func (p *Pages) Handle(message *sabuhp.Message, tr sabuhp.Transport) sabuhp.MessageErr {
var reply, err = p.router.ServeRoute(message)
if err != nil {
return sabuhp.WrapErr(err, false)
}
var sendErr error
if reply.Delivery == sabuhp.SendToAll {
sendErr = tr.SendToAll(reply, -1)
} else {
sendErr = tr.SendToOne(reply, -1)
}
if sendErr != nil {
return sabuhp.WrapErr(sendErr, false)
}
return nil
}
func createHandler(pagePath string, manager *PageSessionManager, tr sabuhp.Transport) mixer.Handler {
return mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) {
var d Data
d.Message = message
d.Path = message.Path
if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil {
d.SessionId = sessionId
}
var page *Page
var sessionErr error
page, sessionErr = manager.Session(d.SessionId)
if sessionErr != nil {
page, d.SessionId, sessionErr = manager.NewSession(tr)
}
if sessionErr != nil {
return nil, nerror.WrapOnly(sessionErr)
}
var renderNode = page.Render(d)
var payload, contentType, writeErr = writeNode(message.ContentType, renderNode)
if writeErr != nil {
return nil, nerror.WrapOnly(writeErr)
}
return &sabuhp.Message{
MessageMeta: sabuhp.MessageMeta{
Path: message.Path,
ContentType: contentType,
SuggestedStatusCode: 200,
Headers: sabuhp.Header{
HeaderSessionIdName: []string{d.SessionId},
},
Cookies: []sabuhp.Cookie{
{
Name: QueryAndCookieSessionIdName,
Value: d.SessionId,
},
},
},
ID: nxid.ID{},
Topic: message.Path,
FromAddr: pagePath,
Delivery: message.Delivery,
Payload: payload,
Metadata: sabuhp.Params{},
Params: sabuhp.Params{},
}, nil
})
}
func writeNode(contentType string, renderNode *domu.Node) ([]byte, string, error) {
var content string
var renderedOutput = bytes.NewBuffer(make([]byte, 0, 512))
switch contentType {
case VoidHTMLDiff:
content = VoidHTMLDiff
if renderErr := RenderVoidHTMLDiff(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSON:
content = VoidJSON
if renderErr := RenderVoidJSON(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidJSONStream:
content = VoidJSONStream
if renderErr := RenderVoidJSONStream(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
case VoidHTML:
fallthrough
default:
content = PlainHTML
if renderErr := RenderVoidHTML(renderNode, renderedOutput); renderErr != nil {
return nil, "", nerror.WrapOnly(renderErr)
}
}
return renderedOutput.Bytes(), content, nil
}
func getSessionId(message *sabuhp.Message) (string, error) {
var sessionIdFromQuery = strings.TrimSpace(message.Query.Get(QueryAndCookieSessionIdName))
var sessionIdFromHeader = strings.TrimSpace(message.Headers.Get(HeaderSessionIdName))
var isASession = len(sessionIdFromHeader) != 0 || len(sessionIdFromQuery) != 0
if !isASession {
return "", nerror.New("not a session")
}
if len(sessionIdFromQuery) != 0 {
return sessionIdFromQuery, nil
}
return sessionIdFromHeader, nil
} | random_line_split | |
helpers.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from config.utils import *
import lp.db_semisuper as db_semisuper
import lp.db_eval as db_eval
from models import *
import itertools
import torch.backends.cudnn as cudnn
import torchvision
class StreamBatchSampler(Sampler):
def __init__(self, primary_indices, batch_size):
self.primary_indices = primary_indices
self.primary_batch_size = batch_size
def __iter__(self):
primary_iter = iterate_eternally(self.primary_indices)
return (primary_batch for (primary_batch)
in grouper(primary_iter, self.primary_batch_size)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
def create_data_loaders_simple(weak_transformation,strong_transformation,
eval_transformation,
datadir,
args):
traindir = os.path.join(datadir, args.train_subdir)
evaldir = os.path.join(datadir, args.eval_subdir)
with open(args.labels) as f:
labels = dict(line.split(' ') for line in f.read().splitlines())
dataset = db_semisuper.DBSS(traindir, labels , False , args.aug_num , eval_transformation,weak_transformation,strong_transformation)
sampler = SubsetRandomSampler(dataset.labeled_idx)
batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=True)
train_loader = torch.utils.data.DataLoader(dataset,batch_sampler=batch_sampler,num_workers=args.workers,pin_memory=True)
train_loader_noshuff = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
eval_dataset = db_eval.DBE(evaldir, False, eval_transformation)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
batch_sampler_l = StreamBatchSampler(dataset.labeled_idx, batch_size=args.labeled_batch_size)
batch_sampler_u = BatchSampler(SubsetRandomSampler(dataset.unlabeled_idx), batch_size=args.batch_size - args.labeled_batch_size, drop_last=True)
train_loader_l = DataLoader(dataset, batch_sampler=batch_sampler_l,
num_workers=args.workers,
pin_memory=True)
train_loader_u = DataLoader(dataset, batch_sampler=batch_sampler_u,
num_workers=args.workers,
pin_memory=True)
return train_loader, eval_loader, train_loader_noshuff , train_loader_l , train_loader_u , dataset
#### Create Model
def create_model(num_classes,args):
model_choice = args.model
if model_choice == "resnet18":
model = resnet18(num_classes)
elif model_choice == "resnet50":
model = resnet50(num_classes)
elif model_choice == "wrn-28-2":
model = build_wideresnet(28,2,0,num_classes)
elif model_choice == "wrn-28-8":
model = build_wideresnet(28,8,0,num_classes)
elif model_choice == "cifarcnn":
model = cifar_cnn(num_classes)
model = nn.DataParallel(model)
model.to(args.device)
cudnn.benchmark = True
return model
def hellinger(p,q):
return np.sqrt(np.sum((np.sqrt(p)-np.sqrt(q))**2))/np.sqrt(2)
def mixup_data(x_1 , index , lam):
mixed_x_1 = lam * x_1 + (1 - lam) * x_1[index, :]
return mixed_x_1
def mixup_criterion(pred, y_a, y_b, lam):
criterion = nn.CrossEntropyLoss(reduction='none').cuda()
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def train_sup(train_loader, model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
for i, (aug_images , target) in enumerate(train_loader):
target = target.to(args.device)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
# Loop over the batches
count = 0
for batch in aug_images:
batch = batch.to(args.device)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
global_step += 1
return global_step
def train_semi(train_loader_l, train_loader_u , model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
lr_length = len(train_loader_u)
train_loader_l = iter(train_loader_l)
if args.progress == True:
from tqdm import tqdm
from torchnet import meter
tk0 = tqdm(train_loader_u,desc="Semi Supervised Learning Epoch " + str(epoch) + "/" +str(args.epochs),unit="batch")
loss_meter = meter.AverageValueMeter()
else:
tk0 = train_loader_u
for i, (aug_images_u,target_u) in enumerate(tk0):
|
return global_step
def validate(eval_loader, model, args, global_step, epoch, num_classes =10):
meters = AverageMeterSet()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(eval_loader):
batch_size = targets.size(0)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs,_ = model(inputs)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
meters.update('top1', prec1.item(), batch_size)
meters.update('error1', 100.0 - prec1.item(), batch_size)
meters.update('top5', prec5.item(), batch_size)
meters.update('error5', 100.0 - prec5.item(), batch_size)
print(' * Prec@1 {top1.avg:.3f}\tPrec@5 {top5.avg:.3f}'
.format(top1=meters['top1'], top5=meters['top5']))
return meters['top1'].avg, meters['top5'].avg
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch, args):
lr = args.lr
epoch = epoch + step_in_epoch / total_steps_in_epoch
# Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
if args.lr_rampdown_epochs:
assert args.lr_rampdown_epochs >= args.epochs
lr *= cosine_rampdown(epoch, args.lr_rampdown_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def extract_features_simp(train_loader,model,args):
model.eval()
embeddings_all = []
with torch.no_grad():
for i, (batch_input) in enumerate(train_loader):
X_n = batch_input[0].to(args.device)
_ , feats = model(X_n)
embeddings_all.append(feats.data.cpu())
embeddings_all = np.asarray(torch.cat(embeddings_all).numpy())
return embeddings_all
def load_args(args):
args.workers = 4 * torch.cuda.device_count()
label_dir = 'data-local/'
if int(args.label_split) < 10:
args.label_split = args.label_split.zfill(2)
if args.dataset == "cifar100":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "cifar10":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "miniimagenet":
args.train_subdir = 'train'
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
else:
sys.exit('Undefined dataset!')
return args
| aug_images_l,target_l = next(train_loader_l)
target_l = target_l.to(args.device)
target_u = target_u.to(args.device)
target = torch.cat((target_l,target_u),0)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, lr_length, args)
count = 0
for batch_l , batch_u in zip(aug_images_l ,aug_images_u):
batch_l = batch_l.to(args.device)
batch_u = batch_u.to(args.device)
batch = torch.cat((batch_l,batch_u),0)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
if args.progress == True:
loss_meter.add(loss.item())
tk0.set_postfix(loss=loss_meter.mean)
global_step += 1 | conditional_block |
helpers.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from config.utils import *
import lp.db_semisuper as db_semisuper
import lp.db_eval as db_eval
from models import *
import itertools
import torch.backends.cudnn as cudnn
import torchvision
class StreamBatchSampler(Sampler):
def __init__(self, primary_indices, batch_size):
self.primary_indices = primary_indices
self.primary_batch_size = batch_size
def __iter__(self):
primary_iter = iterate_eternally(self.primary_indices)
return (primary_batch for (primary_batch)
in grouper(primary_iter, self.primary_batch_size)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
def create_data_loaders_simple(weak_transformation,strong_transformation,
eval_transformation,
datadir,
args):
traindir = os.path.join(datadir, args.train_subdir)
evaldir = os.path.join(datadir, args.eval_subdir)
with open(args.labels) as f:
labels = dict(line.split(' ') for line in f.read().splitlines())
dataset = db_semisuper.DBSS(traindir, labels , False , args.aug_num , eval_transformation,weak_transformation,strong_transformation)
sampler = SubsetRandomSampler(dataset.labeled_idx)
batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=True)
train_loader = torch.utils.data.DataLoader(dataset,batch_sampler=batch_sampler,num_workers=args.workers,pin_memory=True)
train_loader_noshuff = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
eval_dataset = db_eval.DBE(evaldir, False, eval_transformation)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
batch_sampler_l = StreamBatchSampler(dataset.labeled_idx, batch_size=args.labeled_batch_size)
batch_sampler_u = BatchSampler(SubsetRandomSampler(dataset.unlabeled_idx), batch_size=args.batch_size - args.labeled_batch_size, drop_last=True)
train_loader_l = DataLoader(dataset, batch_sampler=batch_sampler_l,
num_workers=args.workers,
pin_memory=True)
train_loader_u = DataLoader(dataset, batch_sampler=batch_sampler_u,
num_workers=args.workers,
pin_memory=True)
return train_loader, eval_loader, train_loader_noshuff , train_loader_l , train_loader_u , dataset
#### Create Model
def create_model(num_classes,args):
model_choice = args.model
if model_choice == "resnet18":
model = resnet18(num_classes)
elif model_choice == "resnet50":
model = resnet50(num_classes)
elif model_choice == "wrn-28-2":
model = build_wideresnet(28,2,0,num_classes)
elif model_choice == "wrn-28-8":
model = build_wideresnet(28,8,0,num_classes)
elif model_choice == "cifarcnn":
model = cifar_cnn(num_classes)
model = nn.DataParallel(model)
model.to(args.device)
cudnn.benchmark = True
return model
def hellinger(p,q):
return np.sqrt(np.sum((np.sqrt(p)-np.sqrt(q))**2))/np.sqrt(2)
def mixup_data(x_1 , index , lam):
mixed_x_1 = lam * x_1 + (1 - lam) * x_1[index, :]
return mixed_x_1
def mixup_criterion(pred, y_a, y_b, lam):
criterion = nn.CrossEntropyLoss(reduction='none').cuda()
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def train_sup(train_loader, model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
for i, (aug_images , target) in enumerate(train_loader):
target = target.to(args.device)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
# Loop over the batches
count = 0
for batch in aug_images:
batch = batch.to(args.device)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
global_step += 1
return global_step
def train_semi(train_loader_l, train_loader_u , model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
lr_length = len(train_loader_u)
train_loader_l = iter(train_loader_l)
if args.progress == True:
from tqdm import tqdm
from torchnet import meter
tk0 = tqdm(train_loader_u,desc="Semi Supervised Learning Epoch " + str(epoch) + "/" +str(args.epochs),unit="batch")
loss_meter = meter.AverageValueMeter()
else:
tk0 = train_loader_u
for i, (aug_images_u,target_u) in enumerate(tk0):
aug_images_l,target_l = next(train_loader_l)
target_l = target_l.to(args.device)
target_u = target_u.to(args.device)
target = torch.cat((target_l,target_u),0)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, lr_length, args)
count = 0
for batch_l , batch_u in zip(aug_images_l ,aug_images_u):
batch_l = batch_l.to(args.device)
batch_u = batch_u.to(args.device)
batch = torch.cat((batch_l,batch_u),0)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
if args.progress == True:
loss_meter.add(loss.item())
tk0.set_postfix(loss=loss_meter.mean)
global_step += 1
return global_step
def validate(eval_loader, model, args, global_step, epoch, num_classes =10):
meters = AverageMeterSet()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(eval_loader):
batch_size = targets.size(0)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device) | outputs,_ = model(inputs)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
meters.update('top1', prec1.item(), batch_size)
meters.update('error1', 100.0 - prec1.item(), batch_size)
meters.update('top5', prec5.item(), batch_size)
meters.update('error5', 100.0 - prec5.item(), batch_size)
print(' * Prec@1 {top1.avg:.3f}\tPrec@5 {top5.avg:.3f}'
.format(top1=meters['top1'], top5=meters['top5']))
return meters['top1'].avg, meters['top5'].avg
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch, args):
lr = args.lr
epoch = epoch + step_in_epoch / total_steps_in_epoch
# Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
if args.lr_rampdown_epochs:
assert args.lr_rampdown_epochs >= args.epochs
lr *= cosine_rampdown(epoch, args.lr_rampdown_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def extract_features_simp(train_loader,model,args):
model.eval()
embeddings_all = []
with torch.no_grad():
for i, (batch_input) in enumerate(train_loader):
X_n = batch_input[0].to(args.device)
_ , feats = model(X_n)
embeddings_all.append(feats.data.cpu())
embeddings_all = np.asarray(torch.cat(embeddings_all).numpy())
return embeddings_all
def load_args(args):
args.workers = 4 * torch.cuda.device_count()
label_dir = 'data-local/'
if int(args.label_split) < 10:
args.label_split = args.label_split.zfill(2)
if args.dataset == "cifar100":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "cifar10":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "miniimagenet":
args.train_subdir = 'train'
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
else:
sys.exit('Undefined dataset!')
return args | random_line_split | |
helpers.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from config.utils import *
import lp.db_semisuper as db_semisuper
import lp.db_eval as db_eval
from models import *
import itertools
import torch.backends.cudnn as cudnn
import torchvision
class StreamBatchSampler(Sampler):
def __init__(self, primary_indices, batch_size):
self.primary_indices = primary_indices
self.primary_batch_size = batch_size
def __iter__(self):
primary_iter = iterate_eternally(self.primary_indices)
return (primary_batch for (primary_batch)
in grouper(primary_iter, self.primary_batch_size)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
def create_data_loaders_simple(weak_transformation,strong_transformation,
eval_transformation,
datadir,
args):
traindir = os.path.join(datadir, args.train_subdir)
evaldir = os.path.join(datadir, args.eval_subdir)
with open(args.labels) as f:
labels = dict(line.split(' ') for line in f.read().splitlines())
dataset = db_semisuper.DBSS(traindir, labels , False , args.aug_num , eval_transformation,weak_transformation,strong_transformation)
sampler = SubsetRandomSampler(dataset.labeled_idx)
batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=True)
train_loader = torch.utils.data.DataLoader(dataset,batch_sampler=batch_sampler,num_workers=args.workers,pin_memory=True)
train_loader_noshuff = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
eval_dataset = db_eval.DBE(evaldir, False, eval_transformation)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
batch_sampler_l = StreamBatchSampler(dataset.labeled_idx, batch_size=args.labeled_batch_size)
batch_sampler_u = BatchSampler(SubsetRandomSampler(dataset.unlabeled_idx), batch_size=args.batch_size - args.labeled_batch_size, drop_last=True)
train_loader_l = DataLoader(dataset, batch_sampler=batch_sampler_l,
num_workers=args.workers,
pin_memory=True)
train_loader_u = DataLoader(dataset, batch_sampler=batch_sampler_u,
num_workers=args.workers,
pin_memory=True)
return train_loader, eval_loader, train_loader_noshuff , train_loader_l , train_loader_u , dataset
#### Create Model
def create_model(num_classes,args):
model_choice = args.model
if model_choice == "resnet18":
model = resnet18(num_classes)
elif model_choice == "resnet50":
model = resnet50(num_classes)
elif model_choice == "wrn-28-2":
model = build_wideresnet(28,2,0,num_classes)
elif model_choice == "wrn-28-8":
model = build_wideresnet(28,8,0,num_classes)
elif model_choice == "cifarcnn":
model = cifar_cnn(num_classes)
model = nn.DataParallel(model)
model.to(args.device)
cudnn.benchmark = True
return model
def hellinger(p,q):
return np.sqrt(np.sum((np.sqrt(p)-np.sqrt(q))**2))/np.sqrt(2)
def mixup_data(x_1 , index , lam):
mixed_x_1 = lam * x_1 + (1 - lam) * x_1[index, :]
return mixed_x_1
def mixup_criterion(pred, y_a, y_b, lam):
|
def train_sup(train_loader, model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
for i, (aug_images , target) in enumerate(train_loader):
target = target.to(args.device)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
# Loop over the batches
count = 0
for batch in aug_images:
batch = batch.to(args.device)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
global_step += 1
return global_step
def train_semi(train_loader_l, train_loader_u , model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
lr_length = len(train_loader_u)
train_loader_l = iter(train_loader_l)
if args.progress == True:
from tqdm import tqdm
from torchnet import meter
tk0 = tqdm(train_loader_u,desc="Semi Supervised Learning Epoch " + str(epoch) + "/" +str(args.epochs),unit="batch")
loss_meter = meter.AverageValueMeter()
else:
tk0 = train_loader_u
for i, (aug_images_u,target_u) in enumerate(tk0):
aug_images_l,target_l = next(train_loader_l)
target_l = target_l.to(args.device)
target_u = target_u.to(args.device)
target = torch.cat((target_l,target_u),0)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, lr_length, args)
count = 0
for batch_l , batch_u in zip(aug_images_l ,aug_images_u):
batch_l = batch_l.to(args.device)
batch_u = batch_u.to(args.device)
batch = torch.cat((batch_l,batch_u),0)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
if args.progress == True:
loss_meter.add(loss.item())
tk0.set_postfix(loss=loss_meter.mean)
global_step += 1
return global_step
def validate(eval_loader, model, args, global_step, epoch, num_classes =10):
meters = AverageMeterSet()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(eval_loader):
batch_size = targets.size(0)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs,_ = model(inputs)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
meters.update('top1', prec1.item(), batch_size)
meters.update('error1', 100.0 - prec1.item(), batch_size)
meters.update('top5', prec5.item(), batch_size)
meters.update('error5', 100.0 - prec5.item(), batch_size)
print(' * Prec@1 {top1.avg:.3f}\tPrec@5 {top5.avg:.3f}'
.format(top1=meters['top1'], top5=meters['top5']))
return meters['top1'].avg, meters['top5'].avg
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch, args):
lr = args.lr
epoch = epoch + step_in_epoch / total_steps_in_epoch
# Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
if args.lr_rampdown_epochs:
assert args.lr_rampdown_epochs >= args.epochs
lr *= cosine_rampdown(epoch, args.lr_rampdown_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def extract_features_simp(train_loader,model,args):
model.eval()
embeddings_all = []
with torch.no_grad():
for i, (batch_input) in enumerate(train_loader):
X_n = batch_input[0].to(args.device)
_ , feats = model(X_n)
embeddings_all.append(feats.data.cpu())
embeddings_all = np.asarray(torch.cat(embeddings_all).numpy())
return embeddings_all
def load_args(args):
args.workers = 4 * torch.cuda.device_count()
label_dir = 'data-local/'
if int(args.label_split) < 10:
args.label_split = args.label_split.zfill(2)
if args.dataset == "cifar100":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "cifar10":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "miniimagenet":
args.train_subdir = 'train'
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
else:
sys.exit('Undefined dataset!')
return args
| criterion = nn.CrossEntropyLoss(reduction='none').cuda()
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) | identifier_body |
helpers.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from config.utils import *
import lp.db_semisuper as db_semisuper
import lp.db_eval as db_eval
from models import *
import itertools
import torch.backends.cudnn as cudnn
import torchvision
class StreamBatchSampler(Sampler):
def __init__(self, primary_indices, batch_size):
self.primary_indices = primary_indices
self.primary_batch_size = batch_size
def __iter__(self):
primary_iter = iterate_eternally(self.primary_indices)
return (primary_batch for (primary_batch)
in grouper(primary_iter, self.primary_batch_size)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
def create_data_loaders_simple(weak_transformation,strong_transformation,
eval_transformation,
datadir,
args):
traindir = os.path.join(datadir, args.train_subdir)
evaldir = os.path.join(datadir, args.eval_subdir)
with open(args.labels) as f:
labels = dict(line.split(' ') for line in f.read().splitlines())
dataset = db_semisuper.DBSS(traindir, labels , False , args.aug_num , eval_transformation,weak_transformation,strong_transformation)
sampler = SubsetRandomSampler(dataset.labeled_idx)
batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=True)
train_loader = torch.utils.data.DataLoader(dataset,batch_sampler=batch_sampler,num_workers=args.workers,pin_memory=True)
train_loader_noshuff = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
eval_dataset = db_eval.DBE(evaldir, False, eval_transformation)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False)
batch_sampler_l = StreamBatchSampler(dataset.labeled_idx, batch_size=args.labeled_batch_size)
batch_sampler_u = BatchSampler(SubsetRandomSampler(dataset.unlabeled_idx), batch_size=args.batch_size - args.labeled_batch_size, drop_last=True)
train_loader_l = DataLoader(dataset, batch_sampler=batch_sampler_l,
num_workers=args.workers,
pin_memory=True)
train_loader_u = DataLoader(dataset, batch_sampler=batch_sampler_u,
num_workers=args.workers,
pin_memory=True)
return train_loader, eval_loader, train_loader_noshuff , train_loader_l , train_loader_u , dataset
#### Create Model
def create_model(num_classes,args):
model_choice = args.model
if model_choice == "resnet18":
model = resnet18(num_classes)
elif model_choice == "resnet50":
model = resnet50(num_classes)
elif model_choice == "wrn-28-2":
model = build_wideresnet(28,2,0,num_classes)
elif model_choice == "wrn-28-8":
model = build_wideresnet(28,8,0,num_classes)
elif model_choice == "cifarcnn":
model = cifar_cnn(num_classes)
model = nn.DataParallel(model)
model.to(args.device)
cudnn.benchmark = True
return model
def hellinger(p,q):
return np.sqrt(np.sum((np.sqrt(p)-np.sqrt(q))**2))/np.sqrt(2)
def mixup_data(x_1 , index , lam):
mixed_x_1 = lam * x_1 + (1 - lam) * x_1[index, :]
return mixed_x_1
def | (pred, y_a, y_b, lam):
criterion = nn.CrossEntropyLoss(reduction='none').cuda()
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def train_sup(train_loader, model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
for i, (aug_images , target) in enumerate(train_loader):
target = target.to(args.device)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
# Loop over the batches
count = 0
for batch in aug_images:
batch = batch.to(args.device)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double(), target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
global_step += 1
return global_step
def train_semi(train_loader_l, train_loader_u , model, optimizer, epoch, global_step, args, ema_model = None):
# switch to train mode
model.train()
lr_length = len(train_loader_u)
train_loader_l = iter(train_loader_l)
if args.progress == True:
from tqdm import tqdm
from torchnet import meter
tk0 = tqdm(train_loader_u,desc="Semi Supervised Learning Epoch " + str(epoch) + "/" +str(args.epochs),unit="batch")
loss_meter = meter.AverageValueMeter()
else:
tk0 = train_loader_u
for i, (aug_images_u,target_u) in enumerate(tk0):
aug_images_l,target_l = next(train_loader_l)
target_l = target_l.to(args.device)
target_u = target_u.to(args.device)
target = torch.cat((target_l,target_u),0)
#Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size,device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, lr_length, args)
count = 0
for batch_l , batch_u in zip(aug_images_l ,aug_images_u):
batch_l = batch_l.to(args.device)
batch_u = batch_u.to(args.device)
batch = torch.cat((batch_l,batch_u),0)
m_batch = mixup_data(batch,index,lam)
class_logit , _ = model(m_batch)
if count == 0:
loss_sum = mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
else:
loss_sum += mixup_criterion(class_logit.double() , target_a , target_b , lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
if args.progress == True:
loss_meter.add(loss.item())
tk0.set_postfix(loss=loss_meter.mean)
global_step += 1
return global_step
def validate(eval_loader, model, args, global_step, epoch, num_classes =10):
meters = AverageMeterSet()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(eval_loader):
batch_size = targets.size(0)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs,_ = model(inputs)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
meters.update('top1', prec1.item(), batch_size)
meters.update('error1', 100.0 - prec1.item(), batch_size)
meters.update('top5', prec5.item(), batch_size)
meters.update('error5', 100.0 - prec5.item(), batch_size)
print(' * Prec@1 {top1.avg:.3f}\tPrec@5 {top5.avg:.3f}'
.format(top1=meters['top1'], top5=meters['top5']))
return meters['top1'].avg, meters['top5'].avg
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch, args):
lr = args.lr
epoch = epoch + step_in_epoch / total_steps_in_epoch
# Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)
if args.lr_rampdown_epochs:
assert args.lr_rampdown_epochs >= args.epochs
lr *= cosine_rampdown(epoch, args.lr_rampdown_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def extract_features_simp(train_loader,model,args):
model.eval()
embeddings_all = []
with torch.no_grad():
for i, (batch_input) in enumerate(train_loader):
X_n = batch_input[0].to(args.device)
_ , feats = model(X_n)
embeddings_all.append(feats.data.cpu())
embeddings_all = np.asarray(torch.cat(embeddings_all).numpy())
return embeddings_all
def load_args(args):
args.workers = 4 * torch.cuda.device_count()
label_dir = 'data-local/'
if int(args.label_split) < 10:
args.label_split = args.label_split.zfill(2)
if args.dataset == "cifar100":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "cifar10":
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
elif args.dataset == "miniimagenet":
args.train_subdir = 'train'
args.test_batch_size = args.batch_size
args.labels = '%s/labels/%s/%d_balanced_labels/%s.txt' % (label_dir,args.dataset,args.num_labeled,args.label_split)
else:
sys.exit('Undefined dataset!')
return args
| mixup_criterion | identifier_name |
scriptgenerator.py | #!/usr/bin/python3 -i
#
# Copyright 2013-2023 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
from generator import OutputGenerator, enquote, noneStr
def mostOfficial(api, newapi):
"""Return the 'most official' of two related names, api and newapi.
KHR is more official than EXT is more official than everything else.
If there is ambiguity, return api."""
if api[-3:] == 'KHR':
return api
if newapi[-3:] == 'KHR':
return newapi;
if api[-3:] == 'EXT':
return api
if newapi[-3:] == 'EXT':
return newapi;
return api
class ScriptOutputGenerator(OutputGenerator):
"""ScriptOutputGenerator - subclass of OutputGenerator.
Base class to Generate script (Python/Ruby/etc.) data structures
describing API names and relationships.
Similar to DocOutputGenerator, but writes a single file."""
def apiName(self, name):
"""Return True if name is in the reserved API namespace.
Delegates to the conventions object. """
return self.genOpts.conventions.is_api_name(name)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track features being generated
self.features = []
# Reverse map from interface names to features requiring them
self.apimap = {}
# Reverse map from unsupported APIs in this build to aliases which
# are supported
self.nonexistent = {}
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
#
# Dictionaries are keyed by the name of the entity (e.g.
# self.structs is keyed by structure names). Values are
# the names of related entities (e.g. structs contain
# a list of type names of members, enums contain a list
# of enumerants belong to the enumerated type, etc.), or
# just None if there are no directly related entities.
#
# Collect the mappings, then emit the Python script in endFile
self.basetypes = {}
self.consts = {}
self.enums = {}
self.flags = {}
self.funcpointers = {}
self.protos = {}
self.structs = {}
self.handles = {}
self.defines = {}
self.alias = {}
# Dictionary containing the type of a type name
# (e.g. the string name of the dictionary with its contents).
self.typeCategory = {}
self.mapDict = {}
def addInterfaceMapping(self, api, feature, required):
"""Add a reverse mapping in self.apimap from an API to a feature
requiring that API.
- api - name of the API
- feature - name of the feature requiring it
- required - None, or an additional feature dependency within
'feature' """
# Each entry in self.apimap contains one or more
# ( feature, required ) tuples.
deps = ( feature, required )
if api in self.apimap:
self.apimap[api].append(deps)
else:
self.apimap[api] = [ deps ]
def mapInterfaceKeys(self, feature, key):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated
- key - API category - 'define', 'basetype', etc."""
dict = self.featureDictionary[feature][key]
if dict:
# Not clear why handling of command vs. type APIs is different -
# see interfacedocgenerator.py, which this was based on.
if key == 'command':
for required in dict:
for api in dict[required]:
self.addInterfaceMapping(api, feature, required)
else:
for required in dict:
for parent in dict[required]:
for api in dict[required][parent]:
self.addInterfaceMapping(api, feature, required)
def mapInterfaces(self, feature):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated"""
# Map each category of interface
self.mapInterfaceKeys(feature, 'basetype')
self.mapInterfaceKeys(feature, 'bitmask')
self.mapInterfaceKeys(feature, 'command')
self.mapInterfaceKeys(feature, 'define')
self.mapInterfaceKeys(feature, 'enum')
self.mapInterfaceKeys(feature, 'enumconstant')
self.mapInterfaceKeys(feature, 'funcpointer')
self.mapInterfaceKeys(feature, 'handle')
self.mapInterfaceKeys(feature, 'include')
self.mapInterfaceKeys(feature, 'struct')
self.mapInterfaceKeys(feature, 'union')
def endFile(self):
super().endFile()
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Add this feature to the list being tracked
self.features.append( self.featureName )
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
def addName(self, dict, name, value):
"""Add a string entry to the dictionary, quoting it so it gets
printed out correctly in self.endFile()."""
dict[name] = value
def addMapping(self, baseType, refType):
"""Add a mapping between types to mapDict.
Only include API types, so we do not end up with a lot of useless
uint32_t and void types."""
if not self.apiName(baseType) or not self.apiName(refType):
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: IGNORE map from', baseType, '<->', refType)
return
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: map from',
baseType, '<->', refType)
if baseType not in self.mapDict:
baseDict = {}
self.mapDict[baseType] = baseDict
else:
baseDict = self.mapDict[baseType]
if refType not in self.mapDict:
refDict = {}
self.mapDict[refType] = refDict
else:
refDict = self.mapDict[refType]
baseDict[refType] = None
refDict[baseType] = None
def breakCheck(self, procname, name):
"""Debugging aid - call from procname to break on API 'name' if it
matches logic in this call."""
pat = 'VkExternalFenceFeatureFlagBits'
if name[0:len(pat)] == pat:
print('{}(name = {}) matches {}'.format(procname, name, pat))
import pdb
pdb.set_trace()
def genType(self, typeinfo, name, alias):
"""Generate type.
- For 'struct' or 'union' types, defer to genStruct() to
add to the dictionary.
- For 'bitmask' types, add the type name to the 'flags' dictionary,
with the value being the corresponding 'enums' name defining
the acceptable flag bits.
- For 'enum' types, add the type name to the 'enums' dictionary,
with the value being '@STOPHERE@' (because this case seems
never to happen).
- For 'funcpointer' types, add the type name to the 'funcpointers'
dictionary.
- For 'handle' and 'define' types, add the handle or #define name
to the 'struct' dictionary, because that is how the spec sources
tag these types even though they are not structs."""
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, category)
if category in ('struct', 'union'):
self.genStruct(typeinfo, name, alias)
else:
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
# Always emit an alias (?!)
count = 1
# May want to only emit full type definition when not an alias?
else:
# Extract the type name
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, do not emit it.
count = len(noneStr(typeElem.text))
for elem in typeElem:
count += len(noneStr(elem.text)) + len(noneStr(elem.tail))
if count > 0:
if category == 'bitmask':
requiredEnum = typeElem.get('requires')
self.addName(self.flags, name, requiredEnum)
# This happens when the Flags type is defined, but no
# FlagBits are defined yet.
if requiredEnum is not None:
self.addMapping(name, requiredEnum)
elif category == 'enum':
# This case does not seem to come up. It nominally would
# result from
# <type name="Something" category="enum"/>,
# but the output generator does not emit them directly.
self.logMsg('warn', 'ScriptOutputGenerator::genType: invalid \'enum\' category for name:', name)
elif category == 'funcpointer':
self.funcpointers[name] = None
elif category == 'handle':
self.handles[name] = None
elif category == 'define':
self.defines[name] = None
elif category == 'basetype':
# Do not add an entry for base types that are not API types
# e.g. an API Bool type gets an entry, uint32_t does not
if self.apiName(name):
self.basetypes[name] = None
self.addName(self.typeCategory, name, 'basetype')
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name, 'category:', category)
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name)
def genStruct(self, typeinfo, typeName, alias):
"""Generate struct (e.g. C "struct" type).
Add the struct name to the 'structs' dictionary, with the
value being an ordered list of the struct member names."""
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
if alias:
# Add name -> alias mapping
self.addName(self.alias, typeName, alias)
else:
# May want to only emit definition on this branch
True
members = [member.text for member in typeinfo.elem.findall('.//member/name')]
self.structs[typeName] = members
memberTypes = [member.text for member in typeinfo.elem.findall('.//member/type')]
for member_type in memberTypes:
self.addMapping(typeName, member_type)
def genGroup(self, groupinfo, groupName, alias):
"""Generate group (e.g. C "enum" type).
These are concatenated together with other types.
- Add the enum type name to the 'enums' dictionary, with
the value being an ordered list of the enumerant names.
- Add each enumerant name to the 'consts' dictionary, with
the value being the enum type the enumerant is part of."""
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, groupName, 'group')
if alias:
# Add name -> alias mapping
self.addName(self.alias, groupName, alias)
else:
# May want to only emit definition on this branch
True
# Add each nested 'enum' tag
enumerants = [elem.get('name') for elem in groupElem.findall('enum')]
for name in enumerants:
self.addName(self.consts, name, groupName)
# Sort enums for output stability, since their order is irrelevant
self.enums[groupName] = sorted(enumerants)
def genEnum(self, enuminfo, name, alias):
"""Generate enumerant (compile-time constants).
- Add the constant name to the 'consts' dictionary, with the
value being None to indicate that the constant is not
an enumeration value."""
OutputGenerator.genEnum(self, enuminfo, name, alias)
if name not in self.consts:
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'consts')
self.consts[name] = None
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
# Otherwise, do not add it to the consts dictionary because it is
# already present. This happens due to the generator 'reparentEnums'
# parameter being False, so each extension enum appears in both the
# <enums> type and in the <extension> or <feature> it originally
# came from.
def genCmd(self, cmdinfo, name, alias):
"""Generate command.
- Add the command name to the 'protos' dictionary, with the
value being an ordered list of the parameter names."""
OutputGenerator.genCmd(self, cmdinfo, name, alias)
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'protos')
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
params = [param.text for param in cmdinfo.elem.findall('param/name')]
self.protos[name] = params
paramTypes = [param.text for param in cmdinfo.elem.findall('param/type')]
for param_type in paramTypes:
self.addMapping(name, param_type)
def createInverseMap(self):
| """This creates the inverse mapping of nonexistent APIs in this
build to their aliases which are supported. Must be called by
language-specific subclasses before emitting that mapping."""
# Map from APIs not supported in this build to aliases that are.
# When there are multiple valid choices for remapping, choose the
# most-official suffixed one (KHR > EXT > vendor).
for key in self.alias:
# If the API key is aliased to something which does not exist,
# then add the thing that does not exist to the nonexistent map.
# This is used in spec macros to make promoted extension links
# in specs built without the promoted interface refer to the
# older interface instead.
invkey = self.alias[key]
if invkey not in self.typeCategory:
if invkey in self.nonexistent:
# Potentially remap existing mapping to a more official
# alias.
self.nonexistent[invkey] = mostOfficial(self.nonexistent[invkey], key)
else:
# Create remapping to an alias
self.nonexistent[invkey] = key | identifier_body | |
scriptgenerator.py | #!/usr/bin/python3 -i
#
# Copyright 2013-2023 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
from generator import OutputGenerator, enquote, noneStr
def | (api, newapi):
"""Return the 'most official' of two related names, api and newapi.
KHR is more official than EXT is more official than everything else.
If there is ambiguity, return api."""
if api[-3:] == 'KHR':
return api
if newapi[-3:] == 'KHR':
return newapi;
if api[-3:] == 'EXT':
return api
if newapi[-3:] == 'EXT':
return newapi;
return api
class ScriptOutputGenerator(OutputGenerator):
"""ScriptOutputGenerator - subclass of OutputGenerator.
Base class to Generate script (Python/Ruby/etc.) data structures
describing API names and relationships.
Similar to DocOutputGenerator, but writes a single file."""
def apiName(self, name):
"""Return True if name is in the reserved API namespace.
Delegates to the conventions object. """
return self.genOpts.conventions.is_api_name(name)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track features being generated
self.features = []
# Reverse map from interface names to features requiring them
self.apimap = {}
# Reverse map from unsupported APIs in this build to aliases which
# are supported
self.nonexistent = {}
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
#
# Dictionaries are keyed by the name of the entity (e.g.
# self.structs is keyed by structure names). Values are
# the names of related entities (e.g. structs contain
# a list of type names of members, enums contain a list
# of enumerants belong to the enumerated type, etc.), or
# just None if there are no directly related entities.
#
# Collect the mappings, then emit the Python script in endFile
self.basetypes = {}
self.consts = {}
self.enums = {}
self.flags = {}
self.funcpointers = {}
self.protos = {}
self.structs = {}
self.handles = {}
self.defines = {}
self.alias = {}
# Dictionary containing the type of a type name
# (e.g. the string name of the dictionary with its contents).
self.typeCategory = {}
self.mapDict = {}
def addInterfaceMapping(self, api, feature, required):
"""Add a reverse mapping in self.apimap from an API to a feature
requiring that API.
- api - name of the API
- feature - name of the feature requiring it
- required - None, or an additional feature dependency within
'feature' """
# Each entry in self.apimap contains one or more
# ( feature, required ) tuples.
deps = ( feature, required )
if api in self.apimap:
self.apimap[api].append(deps)
else:
self.apimap[api] = [ deps ]
def mapInterfaceKeys(self, feature, key):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated
- key - API category - 'define', 'basetype', etc."""
dict = self.featureDictionary[feature][key]
if dict:
# Not clear why handling of command vs. type APIs is different -
# see interfacedocgenerator.py, which this was based on.
if key == 'command':
for required in dict:
for api in dict[required]:
self.addInterfaceMapping(api, feature, required)
else:
for required in dict:
for parent in dict[required]:
for api in dict[required][parent]:
self.addInterfaceMapping(api, feature, required)
def mapInterfaces(self, feature):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated"""
# Map each category of interface
self.mapInterfaceKeys(feature, 'basetype')
self.mapInterfaceKeys(feature, 'bitmask')
self.mapInterfaceKeys(feature, 'command')
self.mapInterfaceKeys(feature, 'define')
self.mapInterfaceKeys(feature, 'enum')
self.mapInterfaceKeys(feature, 'enumconstant')
self.mapInterfaceKeys(feature, 'funcpointer')
self.mapInterfaceKeys(feature, 'handle')
self.mapInterfaceKeys(feature, 'include')
self.mapInterfaceKeys(feature, 'struct')
self.mapInterfaceKeys(feature, 'union')
def endFile(self):
super().endFile()
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Add this feature to the list being tracked
self.features.append( self.featureName )
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
def addName(self, dict, name, value):
"""Add a string entry to the dictionary, quoting it so it gets
printed out correctly in self.endFile()."""
dict[name] = value
def addMapping(self, baseType, refType):
"""Add a mapping between types to mapDict.
Only include API types, so we do not end up with a lot of useless
uint32_t and void types."""
if not self.apiName(baseType) or not self.apiName(refType):
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: IGNORE map from', baseType, '<->', refType)
return
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: map from',
baseType, '<->', refType)
if baseType not in self.mapDict:
baseDict = {}
self.mapDict[baseType] = baseDict
else:
baseDict = self.mapDict[baseType]
if refType not in self.mapDict:
refDict = {}
self.mapDict[refType] = refDict
else:
refDict = self.mapDict[refType]
baseDict[refType] = None
refDict[baseType] = None
def breakCheck(self, procname, name):
"""Debugging aid - call from procname to break on API 'name' if it
matches logic in this call."""
pat = 'VkExternalFenceFeatureFlagBits'
if name[0:len(pat)] == pat:
print('{}(name = {}) matches {}'.format(procname, name, pat))
import pdb
pdb.set_trace()
def genType(self, typeinfo, name, alias):
"""Generate type.
- For 'struct' or 'union' types, defer to genStruct() to
add to the dictionary.
- For 'bitmask' types, add the type name to the 'flags' dictionary,
with the value being the corresponding 'enums' name defining
the acceptable flag bits.
- For 'enum' types, add the type name to the 'enums' dictionary,
with the value being '@STOPHERE@' (because this case seems
never to happen).
- For 'funcpointer' types, add the type name to the 'funcpointers'
dictionary.
- For 'handle' and 'define' types, add the handle or #define name
to the 'struct' dictionary, because that is how the spec sources
tag these types even though they are not structs."""
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, category)
if category in ('struct', 'union'):
self.genStruct(typeinfo, name, alias)
else:
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
# Always emit an alias (?!)
count = 1
# May want to only emit full type definition when not an alias?
else:
# Extract the type name
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, do not emit it.
count = len(noneStr(typeElem.text))
for elem in typeElem:
count += len(noneStr(elem.text)) + len(noneStr(elem.tail))
if count > 0:
if category == 'bitmask':
requiredEnum = typeElem.get('requires')
self.addName(self.flags, name, requiredEnum)
# This happens when the Flags type is defined, but no
# FlagBits are defined yet.
if requiredEnum is not None:
self.addMapping(name, requiredEnum)
elif category == 'enum':
# This case does not seem to come up. It nominally would
# result from
# <type name="Something" category="enum"/>,
# but the output generator does not emit them directly.
self.logMsg('warn', 'ScriptOutputGenerator::genType: invalid \'enum\' category for name:', name)
elif category == 'funcpointer':
self.funcpointers[name] = None
elif category == 'handle':
self.handles[name] = None
elif category == 'define':
self.defines[name] = None
elif category == 'basetype':
# Do not add an entry for base types that are not API types
# e.g. an API Bool type gets an entry, uint32_t does not
if self.apiName(name):
self.basetypes[name] = None
self.addName(self.typeCategory, name, 'basetype')
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name, 'category:', category)
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name)
def genStruct(self, typeinfo, typeName, alias):
"""Generate struct (e.g. C "struct" type).
Add the struct name to the 'structs' dictionary, with the
value being an ordered list of the struct member names."""
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
if alias:
# Add name -> alias mapping
self.addName(self.alias, typeName, alias)
else:
# May want to only emit definition on this branch
True
members = [member.text for member in typeinfo.elem.findall('.//member/name')]
self.structs[typeName] = members
memberTypes = [member.text for member in typeinfo.elem.findall('.//member/type')]
for member_type in memberTypes:
self.addMapping(typeName, member_type)
def genGroup(self, groupinfo, groupName, alias):
"""Generate group (e.g. C "enum" type).
These are concatenated together with other types.
- Add the enum type name to the 'enums' dictionary, with
the value being an ordered list of the enumerant names.
- Add each enumerant name to the 'consts' dictionary, with
the value being the enum type the enumerant is part of."""
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, groupName, 'group')
if alias:
# Add name -> alias mapping
self.addName(self.alias, groupName, alias)
else:
# May want to only emit definition on this branch
True
# Add each nested 'enum' tag
enumerants = [elem.get('name') for elem in groupElem.findall('enum')]
for name in enumerants:
self.addName(self.consts, name, groupName)
# Sort enums for output stability, since their order is irrelevant
self.enums[groupName] = sorted(enumerants)
def genEnum(self, enuminfo, name, alias):
"""Generate enumerant (compile-time constants).
- Add the constant name to the 'consts' dictionary, with the
value being None to indicate that the constant is not
an enumeration value."""
OutputGenerator.genEnum(self, enuminfo, name, alias)
if name not in self.consts:
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'consts')
self.consts[name] = None
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
# Otherwise, do not add it to the consts dictionary because it is
# already present. This happens due to the generator 'reparentEnums'
# parameter being False, so each extension enum appears in both the
# <enums> type and in the <extension> or <feature> it originally
# came from.
def genCmd(self, cmdinfo, name, alias):
"""Generate command.
- Add the command name to the 'protos' dictionary, with the
value being an ordered list of the parameter names."""
OutputGenerator.genCmd(self, cmdinfo, name, alias)
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'protos')
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
params = [param.text for param in cmdinfo.elem.findall('param/name')]
self.protos[name] = params
paramTypes = [param.text for param in cmdinfo.elem.findall('param/type')]
for param_type in paramTypes:
self.addMapping(name, param_type)
def createInverseMap(self):
"""This creates the inverse mapping of nonexistent APIs in this
build to their aliases which are supported. Must be called by
language-specific subclasses before emitting that mapping."""
# Map from APIs not supported in this build to aliases that are.
# When there are multiple valid choices for remapping, choose the
# most-official suffixed one (KHR > EXT > vendor).
for key in self.alias:
# If the API key is aliased to something which does not exist,
# then add the thing that does not exist to the nonexistent map.
# This is used in spec macros to make promoted extension links
# in specs built without the promoted interface refer to the
# older interface instead.
invkey = self.alias[key]
if invkey not in self.typeCategory:
if invkey in self.nonexistent:
# Potentially remap existing mapping to a more official
# alias.
self.nonexistent[invkey] = mostOfficial(self.nonexistent[invkey], key)
else:
# Create remapping to an alias
self.nonexistent[invkey] = key
| mostOfficial | identifier_name |
scriptgenerator.py | #!/usr/bin/python3 -i
#
# Copyright 2013-2023 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
from generator import OutputGenerator, enquote, noneStr
def mostOfficial(api, newapi):
"""Return the 'most official' of two related names, api and newapi.
KHR is more official than EXT is more official than everything else.
If there is ambiguity, return api."""
if api[-3:] == 'KHR':
return api
if newapi[-3:] == 'KHR':
return newapi;
if api[-3:] == 'EXT':
return api
if newapi[-3:] == 'EXT':
return newapi;
return api
class ScriptOutputGenerator(OutputGenerator):
"""ScriptOutputGenerator - subclass of OutputGenerator.
Base class to Generate script (Python/Ruby/etc.) data structures
describing API names and relationships.
Similar to DocOutputGenerator, but writes a single file."""
def apiName(self, name):
"""Return True if name is in the reserved API namespace.
Delegates to the conventions object. """
return self.genOpts.conventions.is_api_name(name)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track features being generated
self.features = []
# Reverse map from interface names to features requiring them
self.apimap = {}
# Reverse map from unsupported APIs in this build to aliases which
# are supported
self.nonexistent = {}
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
#
# Dictionaries are keyed by the name of the entity (e.g.
# self.structs is keyed by structure names). Values are
# the names of related entities (e.g. structs contain
# a list of type names of members, enums contain a list
# of enumerants belong to the enumerated type, etc.), or
# just None if there are no directly related entities.
#
# Collect the mappings, then emit the Python script in endFile
self.basetypes = {}
self.consts = {}
self.enums = {}
self.flags = {}
self.funcpointers = {}
self.protos = {}
self.structs = {}
self.handles = {}
self.defines = {}
self.alias = {}
# Dictionary containing the type of a type name
# (e.g. the string name of the dictionary with its contents).
self.typeCategory = {}
self.mapDict = {}
def addInterfaceMapping(self, api, feature, required):
"""Add a reverse mapping in self.apimap from an API to a feature
requiring that API.
- api - name of the API
- feature - name of the feature requiring it
- required - None, or an additional feature dependency within
'feature' """
# Each entry in self.apimap contains one or more
# ( feature, required ) tuples.
deps = ( feature, required )
if api in self.apimap:
self.apimap[api].append(deps)
else:
self.apimap[api] = [ deps ]
def mapInterfaceKeys(self, feature, key):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated
- key - API category - 'define', 'basetype', etc."""
dict = self.featureDictionary[feature][key]
if dict:
# Not clear why handling of command vs. type APIs is different -
# see interfacedocgenerator.py, which this was based on.
if key == 'command':
for required in dict:
for api in dict[required]:
self.addInterfaceMapping(api, feature, required)
else:
for required in dict:
for parent in dict[required]:
for api in dict[required][parent]:
self.addInterfaceMapping(api, feature, required)
def mapInterfaces(self, feature):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated"""
# Map each category of interface
self.mapInterfaceKeys(feature, 'basetype')
self.mapInterfaceKeys(feature, 'bitmask')
self.mapInterfaceKeys(feature, 'command')
self.mapInterfaceKeys(feature, 'define')
self.mapInterfaceKeys(feature, 'enum')
self.mapInterfaceKeys(feature, 'enumconstant')
self.mapInterfaceKeys(feature, 'funcpointer')
self.mapInterfaceKeys(feature, 'handle')
self.mapInterfaceKeys(feature, 'include')
self.mapInterfaceKeys(feature, 'struct')
self.mapInterfaceKeys(feature, 'union')
def endFile(self):
super().endFile()
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Add this feature to the list being tracked
self.features.append( self.featureName )
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
def addName(self, dict, name, value):
"""Add a string entry to the dictionary, quoting it so it gets
printed out correctly in self.endFile()."""
dict[name] = value
def addMapping(self, baseType, refType):
"""Add a mapping between types to mapDict.
Only include API types, so we do not end up with a lot of useless
uint32_t and void types."""
if not self.apiName(baseType) or not self.apiName(refType):
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: IGNORE map from', baseType, '<->', refType)
return
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: map from',
baseType, '<->', refType)
if baseType not in self.mapDict:
baseDict = {}
self.mapDict[baseType] = baseDict
else:
baseDict = self.mapDict[baseType]
if refType not in self.mapDict:
refDict = {}
self.mapDict[refType] = refDict
else:
refDict = self.mapDict[refType]
baseDict[refType] = None
refDict[baseType] = None
def breakCheck(self, procname, name):
"""Debugging aid - call from procname to break on API 'name' if it
matches logic in this call."""
pat = 'VkExternalFenceFeatureFlagBits'
if name[0:len(pat)] == pat:
print('{}(name = {}) matches {}'.format(procname, name, pat))
import pdb
pdb.set_trace()
def genType(self, typeinfo, name, alias):
"""Generate type.
- For 'struct' or 'union' types, defer to genStruct() to
add to the dictionary.
- For 'bitmask' types, add the type name to the 'flags' dictionary,
with the value being the corresponding 'enums' name defining
the acceptable flag bits.
- For 'enum' types, add the type name to the 'enums' dictionary,
with the value being '@STOPHERE@' (because this case seems
never to happen).
- For 'funcpointer' types, add the type name to the 'funcpointers'
dictionary.
- For 'handle' and 'define' types, add the handle or #define name
to the 'struct' dictionary, because that is how the spec sources
tag these types even though they are not structs."""
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, category)
if category in ('struct', 'union'):
self.genStruct(typeinfo, name, alias)
else:
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
# Always emit an alias (?!)
count = 1
# May want to only emit full type definition when not an alias?
else:
# Extract the type name
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, do not emit it.
count = len(noneStr(typeElem.text))
for elem in typeElem:
count += len(noneStr(elem.text)) + len(noneStr(elem.tail))
if count > 0:
if category == 'bitmask':
requiredEnum = typeElem.get('requires')
self.addName(self.flags, name, requiredEnum)
# This happens when the Flags type is defined, but no
# FlagBits are defined yet.
if requiredEnum is not None:
self.addMapping(name, requiredEnum)
elif category == 'enum':
# This case does not seem to come up. It nominally would
# result from
# <type name="Something" category="enum"/>,
# but the output generator does not emit them directly.
self.logMsg('warn', 'ScriptOutputGenerator::genType: invalid \'enum\' category for name:', name)
elif category == 'funcpointer': | elif category == 'define':
self.defines[name] = None
elif category == 'basetype':
# Do not add an entry for base types that are not API types
# e.g. an API Bool type gets an entry, uint32_t does not
if self.apiName(name):
self.basetypes[name] = None
self.addName(self.typeCategory, name, 'basetype')
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name, 'category:', category)
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name)
def genStruct(self, typeinfo, typeName, alias):
"""Generate struct (e.g. C "struct" type).
Add the struct name to the 'structs' dictionary, with the
value being an ordered list of the struct member names."""
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
if alias:
# Add name -> alias mapping
self.addName(self.alias, typeName, alias)
else:
# May want to only emit definition on this branch
True
members = [member.text for member in typeinfo.elem.findall('.//member/name')]
self.structs[typeName] = members
memberTypes = [member.text for member in typeinfo.elem.findall('.//member/type')]
for member_type in memberTypes:
self.addMapping(typeName, member_type)
def genGroup(self, groupinfo, groupName, alias):
"""Generate group (e.g. C "enum" type).
These are concatenated together with other types.
- Add the enum type name to the 'enums' dictionary, with
the value being an ordered list of the enumerant names.
- Add each enumerant name to the 'consts' dictionary, with
the value being the enum type the enumerant is part of."""
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, groupName, 'group')
if alias:
# Add name -> alias mapping
self.addName(self.alias, groupName, alias)
else:
# May want to only emit definition on this branch
True
# Add each nested 'enum' tag
enumerants = [elem.get('name') for elem in groupElem.findall('enum')]
for name in enumerants:
self.addName(self.consts, name, groupName)
# Sort enums for output stability, since their order is irrelevant
self.enums[groupName] = sorted(enumerants)
def genEnum(self, enuminfo, name, alias):
"""Generate enumerant (compile-time constants).
- Add the constant name to the 'consts' dictionary, with the
value being None to indicate that the constant is not
an enumeration value."""
OutputGenerator.genEnum(self, enuminfo, name, alias)
if name not in self.consts:
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'consts')
self.consts[name] = None
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
# Otherwise, do not add it to the consts dictionary because it is
# already present. This happens due to the generator 'reparentEnums'
# parameter being False, so each extension enum appears in both the
# <enums> type and in the <extension> or <feature> it originally
# came from.
def genCmd(self, cmdinfo, name, alias):
"""Generate command.
- Add the command name to the 'protos' dictionary, with the
value being an ordered list of the parameter names."""
OutputGenerator.genCmd(self, cmdinfo, name, alias)
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'protos')
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
params = [param.text for param in cmdinfo.elem.findall('param/name')]
self.protos[name] = params
paramTypes = [param.text for param in cmdinfo.elem.findall('param/type')]
for param_type in paramTypes:
self.addMapping(name, param_type)
def createInverseMap(self):
"""This creates the inverse mapping of nonexistent APIs in this
build to their aliases which are supported. Must be called by
language-specific subclasses before emitting that mapping."""
# Map from APIs not supported in this build to aliases that are.
# When there are multiple valid choices for remapping, choose the
# most-official suffixed one (KHR > EXT > vendor).
for key in self.alias:
# If the API key is aliased to something which does not exist,
# then add the thing that does not exist to the nonexistent map.
# This is used in spec macros to make promoted extension links
# in specs built without the promoted interface refer to the
# older interface instead.
invkey = self.alias[key]
if invkey not in self.typeCategory:
if invkey in self.nonexistent:
# Potentially remap existing mapping to a more official
# alias.
self.nonexistent[invkey] = mostOfficial(self.nonexistent[invkey], key)
else:
# Create remapping to an alias
self.nonexistent[invkey] = key | self.funcpointers[name] = None
elif category == 'handle':
self.handles[name] = None | random_line_split |
scriptgenerator.py | #!/usr/bin/python3 -i
#
# Copyright 2013-2023 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
from generator import OutputGenerator, enquote, noneStr
def mostOfficial(api, newapi):
"""Return the 'most official' of two related names, api and newapi.
KHR is more official than EXT is more official than everything else.
If there is ambiguity, return api."""
if api[-3:] == 'KHR':
return api
if newapi[-3:] == 'KHR':
return newapi;
if api[-3:] == 'EXT':
return api
if newapi[-3:] == 'EXT':
return newapi;
return api
class ScriptOutputGenerator(OutputGenerator):
"""ScriptOutputGenerator - subclass of OutputGenerator.
Base class to Generate script (Python/Ruby/etc.) data structures
describing API names and relationships.
Similar to DocOutputGenerator, but writes a single file."""
def apiName(self, name):
"""Return True if name is in the reserved API namespace.
Delegates to the conventions object. """
return self.genOpts.conventions.is_api_name(name)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track features being generated
self.features = []
# Reverse map from interface names to features requiring them
self.apimap = {}
# Reverse map from unsupported APIs in this build to aliases which
# are supported
self.nonexistent = {}
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
#
# Dictionaries are keyed by the name of the entity (e.g.
# self.structs is keyed by structure names). Values are
# the names of related entities (e.g. structs contain
# a list of type names of members, enums contain a list
# of enumerants belong to the enumerated type, etc.), or
# just None if there are no directly related entities.
#
# Collect the mappings, then emit the Python script in endFile
self.basetypes = {}
self.consts = {}
self.enums = {}
self.flags = {}
self.funcpointers = {}
self.protos = {}
self.structs = {}
self.handles = {}
self.defines = {}
self.alias = {}
# Dictionary containing the type of a type name
# (e.g. the string name of the dictionary with its contents).
self.typeCategory = {}
self.mapDict = {}
def addInterfaceMapping(self, api, feature, required):
"""Add a reverse mapping in self.apimap from an API to a feature
requiring that API.
- api - name of the API
- feature - name of the feature requiring it
- required - None, or an additional feature dependency within
'feature' """
# Each entry in self.apimap contains one or more
# ( feature, required ) tuples.
deps = ( feature, required )
if api in self.apimap:
self.apimap[api].append(deps)
else:
self.apimap[api] = [ deps ]
def mapInterfaceKeys(self, feature, key):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated
- key - API category - 'define', 'basetype', etc."""
dict = self.featureDictionary[feature][key]
if dict:
# Not clear why handling of command vs. type APIs is different -
# see interfacedocgenerator.py, which this was based on.
if key == 'command':
for required in dict:
for api in dict[required]:
self.addInterfaceMapping(api, feature, required)
else:
for required in dict:
for parent in dict[required]:
for api in dict[required][parent]:
self.addInterfaceMapping(api, feature, required)
def mapInterfaces(self, feature):
"""Construct reverse mapping of APIs to features requiring them in
self.apimap.
- feature - name of the feature being generated"""
# Map each category of interface
self.mapInterfaceKeys(feature, 'basetype')
self.mapInterfaceKeys(feature, 'bitmask')
self.mapInterfaceKeys(feature, 'command')
self.mapInterfaceKeys(feature, 'define')
self.mapInterfaceKeys(feature, 'enum')
self.mapInterfaceKeys(feature, 'enumconstant')
self.mapInterfaceKeys(feature, 'funcpointer')
self.mapInterfaceKeys(feature, 'handle')
self.mapInterfaceKeys(feature, 'include')
self.mapInterfaceKeys(feature, 'struct')
self.mapInterfaceKeys(feature, 'union')
def endFile(self):
super().endFile()
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Add this feature to the list being tracked
self.features.append( self.featureName )
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
def addName(self, dict, name, value):
"""Add a string entry to the dictionary, quoting it so it gets
printed out correctly in self.endFile()."""
dict[name] = value
def addMapping(self, baseType, refType):
"""Add a mapping between types to mapDict.
Only include API types, so we do not end up with a lot of useless
uint32_t and void types."""
if not self.apiName(baseType) or not self.apiName(refType):
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: IGNORE map from', baseType, '<->', refType)
return
self.logMsg('diag', 'ScriptOutputGenerator::addMapping: map from',
baseType, '<->', refType)
if baseType not in self.mapDict:
baseDict = {}
self.mapDict[baseType] = baseDict
else:
baseDict = self.mapDict[baseType]
if refType not in self.mapDict:
refDict = {}
self.mapDict[refType] = refDict
else:
refDict = self.mapDict[refType]
baseDict[refType] = None
refDict[baseType] = None
def breakCheck(self, procname, name):
"""Debugging aid - call from procname to break on API 'name' if it
matches logic in this call."""
pat = 'VkExternalFenceFeatureFlagBits'
if name[0:len(pat)] == pat:
print('{}(name = {}) matches {}'.format(procname, name, pat))
import pdb
pdb.set_trace()
def genType(self, typeinfo, name, alias):
"""Generate type.
- For 'struct' or 'union' types, defer to genStruct() to
add to the dictionary.
- For 'bitmask' types, add the type name to the 'flags' dictionary,
with the value being the corresponding 'enums' name defining
the acceptable flag bits.
- For 'enum' types, add the type name to the 'enums' dictionary,
with the value being '@STOPHERE@' (because this case seems
never to happen).
- For 'funcpointer' types, add the type name to the 'funcpointers'
dictionary.
- For 'handle' and 'define' types, add the handle or #define name
to the 'struct' dictionary, because that is how the spec sources
tag these types even though they are not structs."""
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, category)
if category in ('struct', 'union'):
self.genStruct(typeinfo, name, alias)
else:
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
# Always emit an alias (?!)
count = 1
# May want to only emit full type definition when not an alias?
else:
# Extract the type name
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, do not emit it.
count = len(noneStr(typeElem.text))
for elem in typeElem:
count += len(noneStr(elem.text)) + len(noneStr(elem.tail))
if count > 0:
if category == 'bitmask':
requiredEnum = typeElem.get('requires')
self.addName(self.flags, name, requiredEnum)
# This happens when the Flags type is defined, but no
# FlagBits are defined yet.
if requiredEnum is not None:
self.addMapping(name, requiredEnum)
elif category == 'enum':
# This case does not seem to come up. It nominally would
# result from
# <type name="Something" category="enum"/>,
# but the output generator does not emit them directly.
self.logMsg('warn', 'ScriptOutputGenerator::genType: invalid \'enum\' category for name:', name)
elif category == 'funcpointer':
self.funcpointers[name] = None
elif category == 'handle':
self.handles[name] = None
elif category == 'define':
self.defines[name] = None
elif category == 'basetype':
# Do not add an entry for base types that are not API types
# e.g. an API Bool type gets an entry, uint32_t does not
if self.apiName(name):
self.basetypes[name] = None
self.addName(self.typeCategory, name, 'basetype')
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name, 'category:', category)
else:
self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name)
def genStruct(self, typeinfo, typeName, alias):
"""Generate struct (e.g. C "struct" type).
Add the struct name to the 'structs' dictionary, with the
value being an ordered list of the struct member names."""
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
if alias:
# Add name -> alias mapping
self.addName(self.alias, typeName, alias)
else:
# May want to only emit definition on this branch
True
members = [member.text for member in typeinfo.elem.findall('.//member/name')]
self.structs[typeName] = members
memberTypes = [member.text for member in typeinfo.elem.findall('.//member/type')]
for member_type in memberTypes:
self.addMapping(typeName, member_type)
def genGroup(self, groupinfo, groupName, alias):
"""Generate group (e.g. C "enum" type).
These are concatenated together with other types.
- Add the enum type name to the 'enums' dictionary, with
the value being an ordered list of the enumerant names.
- Add each enumerant name to the 'consts' dictionary, with
the value being the enum type the enumerant is part of."""
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, groupName, 'group')
if alias:
# Add name -> alias mapping
self.addName(self.alias, groupName, alias)
else:
# May want to only emit definition on this branch
True
# Add each nested 'enum' tag
enumerants = [elem.get('name') for elem in groupElem.findall('enum')]
for name in enumerants:
self.addName(self.consts, name, groupName)
# Sort enums for output stability, since their order is irrelevant
self.enums[groupName] = sorted(enumerants)
def genEnum(self, enuminfo, name, alias):
"""Generate enumerant (compile-time constants).
- Add the constant name to the 'consts' dictionary, with the
value being None to indicate that the constant is not
an enumeration value."""
OutputGenerator.genEnum(self, enuminfo, name, alias)
if name not in self.consts:
# Add a typeCategory{} entry for the category of this type.
|
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
# Otherwise, do not add it to the consts dictionary because it is
# already present. This happens due to the generator 'reparentEnums'
# parameter being False, so each extension enum appears in both the
# <enums> type and in the <extension> or <feature> it originally
# came from.
def genCmd(self, cmdinfo, name, alias):
"""Generate command.
- Add the command name to the 'protos' dictionary, with the
value being an ordered list of the parameter names."""
OutputGenerator.genCmd(self, cmdinfo, name, alias)
# Add a typeCategory{} entry for the category of this type.
self.addName(self.typeCategory, name, 'protos')
if alias:
# Add name -> alias mapping
self.addName(self.alias, name, alias)
else:
# May want to only emit definition on this branch
True
params = [param.text for param in cmdinfo.elem.findall('param/name')]
self.protos[name] = params
paramTypes = [param.text for param in cmdinfo.elem.findall('param/type')]
for param_type in paramTypes:
self.addMapping(name, param_type)
def createInverseMap(self):
"""This creates the inverse mapping of nonexistent APIs in this
build to their aliases which are supported. Must be called by
language-specific subclasses before emitting that mapping."""
# Map from APIs not supported in this build to aliases that are.
# When there are multiple valid choices for remapping, choose the
# most-official suffixed one (KHR > EXT > vendor).
for key in self.alias:
# If the API key is aliased to something which does not exist,
# then add the thing that does not exist to the nonexistent map.
# This is used in spec macros to make promoted extension links
# in specs built without the promoted interface refer to the
# older interface instead.
invkey = self.alias[key]
if invkey not in self.typeCategory:
if invkey in self.nonexistent:
# Potentially remap existing mapping to a more official
# alias.
self.nonexistent[invkey] = mostOfficial(self.nonexistent[invkey], key)
else:
# Create remapping to an alias
self.nonexistent[invkey] = key
| self.addName(self.typeCategory, name, 'consts')
self.consts[name] = None | conditional_block |
process.go | // Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tumble
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"time"
"go.chromium.org/luci/appengine/memlock"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/common/sync/parallel"
"go.chromium.org/luci/common/tsmon/field"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/gae/filter/txnBuf"
ds "go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/datastore/serialize"
"go.chromium.org/gae/service/info"
mc "go.chromium.org/gae/service/memcache"
)
const (
// minNoWorkDelay is the minimum amount of time to sleep in between rounds if
// there was no work done in that round.
minNoWorkDelay = time.Second
)
var metricCompleted = metric.NewCounter(
"luci/tumble/mutations/completed",
"The number of mutations completed by tumble, but not necessarily deleted.",
nil,
field.String("namespace"),
)
var metricFailed = metric.NewCounter(
"luci/tumble/mutations/failed",
"The number of mutations attempted in tumble, but failed to complete.",
nil,
field.String("namespace"),
)
var metricDeleted = metric.NewCounter(
"luci/tumble/mutations/deleted",
"The number of mutations deleted by tumble.",
nil,
field.String("namespace"),
)
// expandedShardBounds returns the boundary of the expandedShard order that
// currently corresponds to this shard number. If Shard is < 0 or > NumShards
// (the currently configured number of shards), this will return a low > high.
// Otherwise low < high.
func expandedShardBounds(c context.Context, cfg *Config, shard uint64) (low, high int64) {
totalShards := cfg.TotalShardCount(info.GetNamespace(c))
if shard < 0 || uint64(shard) >= totalShards {
logging.Warningf(c, "Invalid shard: %d", shard)
// return inverted bounds
return 0, -1
}
expandedShardsPerShard := int64(math.MaxUint64 / totalShards)
low = math.MinInt64 + (int64(shard) * expandedShardsPerShard)
if uint64(shard) == totalShards-1 {
high = math.MaxInt64
} else {
high = low + expandedShardsPerShard
}
return
}
func processShardQuery(c context.Context, cfg *Config, shard uint64) *ds.Query {
low, high := expandedShardBounds(c, cfg, shard)
if low > high {
return nil
}
q := ds.NewQuery("tumble.Mutation").
Gte("ExpandedShard", low).Lte("ExpandedShard", high).
Project("TargetRoot").Distinct(true)
return q
}
// processShard is the tumble backend endpoint. This accepts a shard number
// which is expected to be < GlobalConfig.NumShards.
func processShard(c context.Context, cfg *Config, timestamp time.Time, shard uint64, loop bool) error {
logging.Fields{
"shard": shard,
}.Infof(c, "Processing tumble shard.")
q := processShardQuery(c, cfg, shard)
if q == nil {
logging.Warningf(c, "dead shard, quitting")
return nil
}
// Calculate our end itme. If we're not looping or we have a <= 0 duration,
// we will perform a single loop.
var endTime time.Time
if cfg.ProcessLoopDuration > 0 {
endTime = clock.Now(c).Add(time.Duration(cfg.ProcessLoopDuration))
logging.Debugf(c, "Process loop is configured to exit after [%s] at %s",
cfg.ProcessLoopDuration.String(), endTime)
}
// Lock around the shard that we are trying to modify.
//
// Since memcache is namespaced, we don't need to include the namespace in our
// lock name.
task := makeProcessTask(timestamp, endTime, shard, loop)
lockKey := fmt.Sprintf("%s.%d.lock", baseName, shard)
clientID := fmt.Sprintf("%d_%d_%s", timestamp.Unix(), shard, info.RequestID(c))
err := memlock.TryWithLock(c, lockKey, clientID, func(c context.Context) error {
return task.process(c, cfg, q)
})
if err == memlock.ErrFailedToLock {
logging.Infof(c, "Couldn't obtain lock (giving up): %s", err)
return nil
}
return err
}
// processTask is a stateful processing task.
type processTask struct {
timestamp time.Time
endTime time.Time
lastKey string
banSets map[string]stringset.Set
loop bool
}
func makeProcessTask(timestamp, endTime time.Time, shard uint64, loop bool) *processTask {
return &processTask{
timestamp: timestamp,
endTime: endTime,
lastKey: fmt.Sprintf("%s.%d.last", baseName, shard),
banSets: make(map[string]stringset.Set),
loop: loop,
}
}
func (t *processTask) process(c context.Context, cfg *Config, q *ds.Query) error {
// this last key allows buffered tasks to early exit if some other shard
// processor has already processed past this task's target timestamp.
lastItm, err := mc.GetKey(c, t.lastKey)
if err != nil {
if err != mc.ErrCacheMiss {
logging.Warningf(c, "couldn't obtain last timestamp: %s", err)
}
} else {
val := lastItm.Value()
last, err := serialize.ReadTime(bytes.NewBuffer(val))
if err != nil {
logging.Warningf(c, "could not decode timestamp %v: %s", val, err)
} else {
last = last.Add(time.Duration(cfg.TemporalRoundFactor))
if last.After(t.timestamp) {
logging.Infof(c, "early exit, %s > %s", last, t.timestamp)
return nil
}
}
}
err = nil
// Loop until our shard processing session expires.
prd := processRoundDelay{
cfg: cfg,
}
prd.reset()
for {
var numProcessed, errCount, transientErrCount counter
// Run our query against a work pool.
//
// NO work pool methods will return errors, so there is no need to collect
// the result. Rather, any error that is encountered will atomically update
// the "errCount" counter (for non-transient errors) or "transientErrCount"
// counter (for transient errors).
_ = parallel.WorkPool(int(cfg.NumGoroutines), func(ch chan<- func() error) {
err := ds.Run(c, q, func(pm ds.PropertyMap) error {
root := pm.Slice("TargetRoot")[0].Value().(*ds.Key)
encRoot := root.Encode()
// TODO(riannucci): make banSets remove keys from the banSet which
// weren't hit. Once they stop showing up, they'll never show up
// again.
bs := t.banSets[encRoot]
if bs == nil {
bs = stringset.New(0)
t.banSets[encRoot] = bs
}
ch <- func() error {
switch err := processRoot(c, cfg, root, bs, &numProcessed); err {
case nil:
return nil
case ds.ErrConcurrentTransaction:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Warningf(c, "Transient error encountered processing root.")
transientErrCount.inc()
return nil
default:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Errorf(c, "Failed to process root.")
errCount.inc()
return nil
}
}
if err := c.Err(); err != nil {
logging.WithError(err).Warningf(c, "Context canceled (lost lock?).")
return ds.Stop
}
return nil
})
if err != nil {
var qstr string
if fq, err := q.Finalize(); err == nil {
qstr = fq.String()
}
logging.Fields{
logging.ErrorKey: err,
"query": qstr,
}.Errorf(c, "Failure to run shard query.")
errCount.inc()
}
})
logging.Infof(c, "cumulatively processed %d items with %d errors(s) and %d transient error(s)",
numProcessed, errCount, transientErrCount)
switch {
case transientErrCount > 0:
return errors.New("transient error during shard processing", transient.Tag)
case errCount > 0:
return errors.New("encountered non-transient error during shard processing")
}
now := clock.Now(c)
didWork := numProcessed > 0
if didWork {
// Set our last key value for next round.
err = mc.Set(c, mc.NewItem(c, t.lastKey).SetValue(serialize.ToBytes(now.UTC())))
if err != nil {
logging.Warningf(c, "could not update last process memcache key %s: %s", t.lastKey, err)
}
} else if t.endTime.IsZero() || !t.loop {
// We didn't do any work this round, and we're configured for a single
// loop, so we're done.
logging.Debugf(c, "Configured for single loop.")
return nil
}
// If we're past our end time, then we're done.
if !t.endTime.IsZero() && now.After(t.endTime) {
logging.Debugf(c, "Exceeded our process loop time by [%s]; terminating loop.", now.Sub(t.endTime))
return nil
}
// Either we are looping, we did work last round, or both. Sleep in between
// processing rounds for a duration based on whether or not we did work.
delay := prd.next(didWork)
if delay > 0 {
// If we have an end time, and this delay would exceed that end time, then
// don't bother sleeping; we're done.
if !t.endTime.IsZero() && now.Add(delay).After(t.endTime) {
logging.Debugf(c, "Delay (%s) exceeds process loop time (%s); terminating loop.",
delay, t.endTime)
return nil
}
logging.Debugf(c, "Sleeping %s in between rounds...", delay)
if err := clock.Sleep(c, delay).Err; err != nil {
logging.WithError(err).Warningf(c, "Sleep interrupted, terminating loop.")
return nil
}
}
}
}
func getBatchByRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set) ([]*realMutation, error) {
q := ds.NewQuery("tumble.Mutation").Eq("TargetRoot", root)
if cfg.DelayedMutations {
q = q.Lte("ProcessAfter", clock.Now(c).UTC())
}
fetchAllocSize := cfg.ProcessMaxBatchSize
if fetchAllocSize < 0 {
fetchAllocSize = 0
}
toFetch := make([]*realMutation, 0, fetchAllocSize)
err := ds.Run(c, q, func(k *ds.Key) error {
if !banSet.Has(k.Encode()) {
toFetch = append(toFetch, &realMutation{
ID: k.StringID(),
Parent: k.Parent(),
})
}
if len(toFetch) < cap(toFetch) {
return nil
}
return ds.Stop
})
return toFetch, err
}
func loadFilteredMutations(c context.Context, rms []*realMutation) ([]*ds.Key, []Mutation, error) {
mutKeys := make([]*ds.Key, 0, len(rms))
muts := make([]Mutation, 0, len(rms))
err := ds.Get(c, rms)
me, ok := err.(errors.MultiError)
if !ok && err != nil {
return nil, nil, err
}
for i, rm := range rms {
err = nil
if me != nil {
err = me[i]
}
if err == nil {
if rm.Version != getAppVersion(c) {
logging.Fields{
"mut_version": rm.Version,
"cur_version": getAppVersion(c),
}.Warningf(c, "loading mutation with different code version")
}
m, err := rm.GetMutation()
if err != nil {
logging.Errorf(c, "couldn't load mutation: %s", err)
continue
}
muts = append(muts, m)
mutKeys = append(mutKeys, ds.KeyForObj(c, rm))
} else if err != ds.ErrNoSuchEntity {
return nil, nil, me
}
}
| type overrideRoot struct {
Mutation
root *ds.Key
}
func (o overrideRoot) Root(context.Context) *ds.Key {
return o.root
}
func processRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set, cnt *counter) error {
l := logging.Get(c)
toFetch, err := getBatchByRoot(c, cfg, root, banSet)
switch {
case err != nil:
l.Errorf("Failed to get batch for root [%s]: %s", root, err)
return err
case len(toFetch) == 0:
return nil
}
mutKeys, muts, err := loadFilteredMutations(c, toFetch)
if err != nil {
return err
}
if c.Err() != nil {
l.Warningf("Lost lock during processRoot")
return nil
}
allShards := map[taskShard]struct{}{}
toDel := make([]*ds.Key, 0, len(muts))
var numMuts, deletedMuts, processedMuts int
err = ds.RunInTransaction(txnBuf.FilterRDS(c), func(c context.Context) error {
toDel = toDel[:0]
numMuts = 0
deletedMuts = 0
processedMuts = 0
iterMuts := muts
iterMutKeys := mutKeys
for i := 0; i < len(iterMuts); i++ {
m := iterMuts[i]
s := clock.Now(c)
logging.Fields{"m": m}.Infof(c, "running RollForward")
shards, newMuts, newMutKeys, err := enterTransactionMutation(c, cfg, overrideRoot{m, root}, uint64(i))
logging.Fields{"m": m}.Infof(c, "done RollForward, took %s", clock.Now(c).Sub(s))
if err != nil {
l.Errorf("Executing decoded gob(%T) failed: %q: %+v", m, err, m)
continue
}
processedMuts++
for j, nm := range newMuts {
if nm.Root(c).HasAncestor(root) {
runNow := !cfg.DelayedMutations
if !runNow {
dm, isDelayedMutation := nm.(DelayedMutation)
runNow = !isDelayedMutation || clock.Now(c).UTC().After(dm.ProcessAfter())
}
if runNow {
iterMuts = append(iterMuts, nm)
iterMutKeys = append(iterMutKeys, newMutKeys[j])
}
}
}
// Finished processing this Mutation.
key := iterMutKeys[i]
switch {
case key.HasAncestor(root):
// try to delete it as part of the same transaction.
if err := ds.Delete(c, key); err == nil {
deletedMuts++
break
}
fallthrough // Failed to delete, try again outside of the transaction.
default:
toDel = append(toDel, key)
}
numMuts += len(newMuts)
for shard := range shards {
allShards[shard] = struct{}{}
}
}
return nil
}, nil)
if err != nil {
l.Errorf("failed running transaction: %s", err)
metricFailed.Add(c, int64(numMuts), root.Namespace())
return err
}
fireTasks(c, cfg, allShards, true)
l.Debugf("successfully processed %d mutations (%d tail-call), delta %d",
processedMuts, deletedMuts, (numMuts - deletedMuts))
metricCompleted.Add(c, int64(processedMuts), root.Namespace())
// This is for the mutations deleted in a transaction.
metricDeleted.Add(c, int64(deletedMuts), root.Namespace())
cnt.add(processedMuts)
if len(toDel) > 0 {
for _, k := range toDel {
banSet.Add(k.Encode())
}
if err := ds.Delete(c, toDel); err != nil {
// This is categorized as failed because it's going to get retried again.
metricFailed.Add(c, int64(len(toDel)), root.Namespace())
l.Warningf("error deleting finished mutations: %s", err)
} else {
// This is for mutations deleted outside of the transaction,
// because they failed to delete the first time we tried to do it
// inside the transaction.
metricDeleted.Add(c, int64(len(toDel)), root.Namespace())
}
}
return nil
}
// counter is an atomic integer counter.
//
// When concurrent access is possible, a counter must only be manipulated with
// its "inc" and "add" methods, and must not be read.
//
// We use an int32 because that is atomically safe across all architectures.
type counter int32
func (c *counter) inc() int { return c.add(1) }
func (c *counter) add(n int) int { return int(atomic.AddInt32((*int32)(c), int32(n))) }
// processRoundDelay calculates the delay to impose in between processing
// rounds.
type processRoundDelay struct {
cfg *Config
nextDelay time.Duration
}
func (prd *processRoundDelay) reset() {
// Reset our delay to DustSettleTimeout.
prd.nextDelay = time.Duration(prd.cfg.DustSettleTimeout)
}
func (prd *processRoundDelay) next(didWork bool) time.Duration {
if didWork {
// Reset our delay to DustSettleTimeout.
prd.reset()
return prd.nextDelay
}
delay := prd.nextDelay
if growth := prd.cfg.NoWorkDelayGrowth; growth > 1 {
prd.nextDelay *= time.Duration(growth)
}
if max := time.Duration(prd.cfg.MaxNoWorkDelay); max > 0 && delay > max {
delay = max
// Cap our "next delay" so it doesn't grow unbounded in the background.
prd.nextDelay = delay
}
// Enforce a no work lower bound.
if delay < minNoWorkDelay {
delay = minNoWorkDelay
}
return delay
} | return mutKeys, muts, nil
}
| random_line_split |
process.go | // Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tumble
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"time"
"go.chromium.org/luci/appengine/memlock"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/common/sync/parallel"
"go.chromium.org/luci/common/tsmon/field"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/gae/filter/txnBuf"
ds "go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/datastore/serialize"
"go.chromium.org/gae/service/info"
mc "go.chromium.org/gae/service/memcache"
)
const (
// minNoWorkDelay is the minimum amount of time to sleep in between rounds if
// there was no work done in that round.
minNoWorkDelay = time.Second
)
var metricCompleted = metric.NewCounter(
"luci/tumble/mutations/completed",
"The number of mutations completed by tumble, but not necessarily deleted.",
nil,
field.String("namespace"),
)
var metricFailed = metric.NewCounter(
"luci/tumble/mutations/failed",
"The number of mutations attempted in tumble, but failed to complete.",
nil,
field.String("namespace"),
)
var metricDeleted = metric.NewCounter(
"luci/tumble/mutations/deleted",
"The number of mutations deleted by tumble.",
nil,
field.String("namespace"),
)
// expandedShardBounds returns the boundary of the expandedShard order that
// currently corresponds to this shard number. If Shard is < 0 or > NumShards
// (the currently configured number of shards), this will return a low > high.
// Otherwise low < high.
func expandedShardBounds(c context.Context, cfg *Config, shard uint64) (low, high int64) {
totalShards := cfg.TotalShardCount(info.GetNamespace(c))
if shard < 0 || uint64(shard) >= totalShards {
logging.Warningf(c, "Invalid shard: %d", shard)
// return inverted bounds
return 0, -1
}
expandedShardsPerShard := int64(math.MaxUint64 / totalShards)
low = math.MinInt64 + (int64(shard) * expandedShardsPerShard)
if uint64(shard) == totalShards-1 {
high = math.MaxInt64
} else {
high = low + expandedShardsPerShard
}
return
}
func processShardQuery(c context.Context, cfg *Config, shard uint64) *ds.Query {
low, high := expandedShardBounds(c, cfg, shard)
if low > high {
return nil
}
q := ds.NewQuery("tumble.Mutation").
Gte("ExpandedShard", low).Lte("ExpandedShard", high).
Project("TargetRoot").Distinct(true)
return q
}
// processShard is the tumble backend endpoint. This accepts a shard number
// which is expected to be < GlobalConfig.NumShards.
func processShard(c context.Context, cfg *Config, timestamp time.Time, shard uint64, loop bool) error {
logging.Fields{
"shard": shard,
}.Infof(c, "Processing tumble shard.")
q := processShardQuery(c, cfg, shard)
if q == nil {
logging.Warningf(c, "dead shard, quitting")
return nil
}
// Calculate our end itme. If we're not looping or we have a <= 0 duration,
// we will perform a single loop.
var endTime time.Time
if cfg.ProcessLoopDuration > 0 {
endTime = clock.Now(c).Add(time.Duration(cfg.ProcessLoopDuration))
logging.Debugf(c, "Process loop is configured to exit after [%s] at %s",
cfg.ProcessLoopDuration.String(), endTime)
}
// Lock around the shard that we are trying to modify.
//
// Since memcache is namespaced, we don't need to include the namespace in our
// lock name.
task := makeProcessTask(timestamp, endTime, shard, loop)
lockKey := fmt.Sprintf("%s.%d.lock", baseName, shard)
clientID := fmt.Sprintf("%d_%d_%s", timestamp.Unix(), shard, info.RequestID(c))
err := memlock.TryWithLock(c, lockKey, clientID, func(c context.Context) error {
return task.process(c, cfg, q)
})
if err == memlock.ErrFailedToLock {
logging.Infof(c, "Couldn't obtain lock (giving up): %s", err)
return nil
}
return err
}
// processTask is a stateful processing task.
type processTask struct {
timestamp time.Time
endTime time.Time
lastKey string
banSets map[string]stringset.Set
loop bool
}
func makeProcessTask(timestamp, endTime time.Time, shard uint64, loop bool) *processTask {
return &processTask{
timestamp: timestamp,
endTime: endTime,
lastKey: fmt.Sprintf("%s.%d.last", baseName, shard),
banSets: make(map[string]stringset.Set),
loop: loop,
}
}
func (t *processTask) process(c context.Context, cfg *Config, q *ds.Query) error {
// this last key allows buffered tasks to early exit if some other shard
// processor has already processed past this task's target timestamp.
lastItm, err := mc.GetKey(c, t.lastKey)
if err != nil {
if err != mc.ErrCacheMiss {
logging.Warningf(c, "couldn't obtain last timestamp: %s", err)
}
} else {
val := lastItm.Value()
last, err := serialize.ReadTime(bytes.NewBuffer(val))
if err != nil {
logging.Warningf(c, "could not decode timestamp %v: %s", val, err)
} else {
last = last.Add(time.Duration(cfg.TemporalRoundFactor))
if last.After(t.timestamp) {
logging.Infof(c, "early exit, %s > %s", last, t.timestamp)
return nil
}
}
}
err = nil
// Loop until our shard processing session expires.
prd := processRoundDelay{
cfg: cfg,
}
prd.reset()
for {
var numProcessed, errCount, transientErrCount counter
// Run our query against a work pool.
//
// NO work pool methods will return errors, so there is no need to collect
// the result. Rather, any error that is encountered will atomically update
// the "errCount" counter (for non-transient errors) or "transientErrCount"
// counter (for transient errors).
_ = parallel.WorkPool(int(cfg.NumGoroutines), func(ch chan<- func() error) {
err := ds.Run(c, q, func(pm ds.PropertyMap) error {
root := pm.Slice("TargetRoot")[0].Value().(*ds.Key)
encRoot := root.Encode()
// TODO(riannucci): make banSets remove keys from the banSet which
// weren't hit. Once they stop showing up, they'll never show up
// again.
bs := t.banSets[encRoot]
if bs == nil {
bs = stringset.New(0)
t.banSets[encRoot] = bs
}
ch <- func() error {
switch err := processRoot(c, cfg, root, bs, &numProcessed); err {
case nil:
return nil
case ds.ErrConcurrentTransaction:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Warningf(c, "Transient error encountered processing root.")
transientErrCount.inc()
return nil
default:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Errorf(c, "Failed to process root.")
errCount.inc()
return nil
}
}
if err := c.Err(); err != nil {
logging.WithError(err).Warningf(c, "Context canceled (lost lock?).")
return ds.Stop
}
return nil
})
if err != nil {
var qstr string
if fq, err := q.Finalize(); err == nil {
qstr = fq.String()
}
logging.Fields{
logging.ErrorKey: err,
"query": qstr,
}.Errorf(c, "Failure to run shard query.")
errCount.inc()
}
})
logging.Infof(c, "cumulatively processed %d items with %d errors(s) and %d transient error(s)",
numProcessed, errCount, transientErrCount)
switch {
case transientErrCount > 0:
return errors.New("transient error during shard processing", transient.Tag)
case errCount > 0:
return errors.New("encountered non-transient error during shard processing")
}
now := clock.Now(c)
didWork := numProcessed > 0
if didWork {
// Set our last key value for next round.
err = mc.Set(c, mc.NewItem(c, t.lastKey).SetValue(serialize.ToBytes(now.UTC())))
if err != nil {
logging.Warningf(c, "could not update last process memcache key %s: %s", t.lastKey, err)
}
} else if t.endTime.IsZero() || !t.loop {
// We didn't do any work this round, and we're configured for a single
// loop, so we're done.
logging.Debugf(c, "Configured for single loop.")
return nil
}
// If we're past our end time, then we're done.
if !t.endTime.IsZero() && now.After(t.endTime) {
logging.Debugf(c, "Exceeded our process loop time by [%s]; terminating loop.", now.Sub(t.endTime))
return nil
}
// Either we are looping, we did work last round, or both. Sleep in between
// processing rounds for a duration based on whether or not we did work.
delay := prd.next(didWork)
if delay > 0 {
// If we have an end time, and this delay would exceed that end time, then
// don't bother sleeping; we're done.
if !t.endTime.IsZero() && now.Add(delay).After(t.endTime) {
logging.Debugf(c, "Delay (%s) exceeds process loop time (%s); terminating loop.",
delay, t.endTime)
return nil
}
logging.Debugf(c, "Sleeping %s in between rounds...", delay)
if err := clock.Sleep(c, delay).Err; err != nil {
logging.WithError(err).Warningf(c, "Sleep interrupted, terminating loop.")
return nil
}
}
}
}
func getBatchByRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set) ([]*realMutation, error) {
q := ds.NewQuery("tumble.Mutation").Eq("TargetRoot", root)
if cfg.DelayedMutations {
q = q.Lte("ProcessAfter", clock.Now(c).UTC())
}
fetchAllocSize := cfg.ProcessMaxBatchSize
if fetchAllocSize < 0 {
fetchAllocSize = 0
}
toFetch := make([]*realMutation, 0, fetchAllocSize)
err := ds.Run(c, q, func(k *ds.Key) error {
if !banSet.Has(k.Encode()) {
toFetch = append(toFetch, &realMutation{
ID: k.StringID(),
Parent: k.Parent(),
})
}
if len(toFetch) < cap(toFetch) {
return nil
}
return ds.Stop
})
return toFetch, err
}
func loadFilteredMutations(c context.Context, rms []*realMutation) ([]*ds.Key, []Mutation, error) {
mutKeys := make([]*ds.Key, 0, len(rms))
muts := make([]Mutation, 0, len(rms))
err := ds.Get(c, rms)
me, ok := err.(errors.MultiError)
if !ok && err != nil {
return nil, nil, err
}
for i, rm := range rms {
err = nil
if me != nil {
err = me[i]
}
if err == nil {
if rm.Version != getAppVersion(c) {
logging.Fields{
"mut_version": rm.Version,
"cur_version": getAppVersion(c),
}.Warningf(c, "loading mutation with different code version")
}
m, err := rm.GetMutation()
if err != nil {
logging.Errorf(c, "couldn't load mutation: %s", err)
continue
}
muts = append(muts, m)
mutKeys = append(mutKeys, ds.KeyForObj(c, rm))
} else if err != ds.ErrNoSuchEntity |
}
return mutKeys, muts, nil
}
type overrideRoot struct {
Mutation
root *ds.Key
}
func (o overrideRoot) Root(context.Context) *ds.Key {
return o.root
}
func processRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set, cnt *counter) error {
l := logging.Get(c)
toFetch, err := getBatchByRoot(c, cfg, root, banSet)
switch {
case err != nil:
l.Errorf("Failed to get batch for root [%s]: %s", root, err)
return err
case len(toFetch) == 0:
return nil
}
mutKeys, muts, err := loadFilteredMutations(c, toFetch)
if err != nil {
return err
}
if c.Err() != nil {
l.Warningf("Lost lock during processRoot")
return nil
}
allShards := map[taskShard]struct{}{}
toDel := make([]*ds.Key, 0, len(muts))
var numMuts, deletedMuts, processedMuts int
err = ds.RunInTransaction(txnBuf.FilterRDS(c), func(c context.Context) error {
toDel = toDel[:0]
numMuts = 0
deletedMuts = 0
processedMuts = 0
iterMuts := muts
iterMutKeys := mutKeys
for i := 0; i < len(iterMuts); i++ {
m := iterMuts[i]
s := clock.Now(c)
logging.Fields{"m": m}.Infof(c, "running RollForward")
shards, newMuts, newMutKeys, err := enterTransactionMutation(c, cfg, overrideRoot{m, root}, uint64(i))
logging.Fields{"m": m}.Infof(c, "done RollForward, took %s", clock.Now(c).Sub(s))
if err != nil {
l.Errorf("Executing decoded gob(%T) failed: %q: %+v", m, err, m)
continue
}
processedMuts++
for j, nm := range newMuts {
if nm.Root(c).HasAncestor(root) {
runNow := !cfg.DelayedMutations
if !runNow {
dm, isDelayedMutation := nm.(DelayedMutation)
runNow = !isDelayedMutation || clock.Now(c).UTC().After(dm.ProcessAfter())
}
if runNow {
iterMuts = append(iterMuts, nm)
iterMutKeys = append(iterMutKeys, newMutKeys[j])
}
}
}
// Finished processing this Mutation.
key := iterMutKeys[i]
switch {
case key.HasAncestor(root):
// try to delete it as part of the same transaction.
if err := ds.Delete(c, key); err == nil {
deletedMuts++
break
}
fallthrough // Failed to delete, try again outside of the transaction.
default:
toDel = append(toDel, key)
}
numMuts += len(newMuts)
for shard := range shards {
allShards[shard] = struct{}{}
}
}
return nil
}, nil)
if err != nil {
l.Errorf("failed running transaction: %s", err)
metricFailed.Add(c, int64(numMuts), root.Namespace())
return err
}
fireTasks(c, cfg, allShards, true)
l.Debugf("successfully processed %d mutations (%d tail-call), delta %d",
processedMuts, deletedMuts, (numMuts - deletedMuts))
metricCompleted.Add(c, int64(processedMuts), root.Namespace())
// This is for the mutations deleted in a transaction.
metricDeleted.Add(c, int64(deletedMuts), root.Namespace())
cnt.add(processedMuts)
if len(toDel) > 0 {
for _, k := range toDel {
banSet.Add(k.Encode())
}
if err := ds.Delete(c, toDel); err != nil {
// This is categorized as failed because it's going to get retried again.
metricFailed.Add(c, int64(len(toDel)), root.Namespace())
l.Warningf("error deleting finished mutations: %s", err)
} else {
// This is for mutations deleted outside of the transaction,
// because they failed to delete the first time we tried to do it
// inside the transaction.
metricDeleted.Add(c, int64(len(toDel)), root.Namespace())
}
}
return nil
}
// counter is an atomic integer counter.
//
// When concurrent access is possible, a counter must only be manipulated with
// its "inc" and "add" methods, and must not be read.
//
// We use an int32 because that is atomically safe across all architectures.
type counter int32
func (c *counter) inc() int { return c.add(1) }
func (c *counter) add(n int) int { return int(atomic.AddInt32((*int32)(c), int32(n))) }
// processRoundDelay calculates the delay to impose in between processing
// rounds.
type processRoundDelay struct {
cfg *Config
nextDelay time.Duration
}
func (prd *processRoundDelay) reset() {
// Reset our delay to DustSettleTimeout.
prd.nextDelay = time.Duration(prd.cfg.DustSettleTimeout)
}
func (prd *processRoundDelay) next(didWork bool) time.Duration {
if didWork {
// Reset our delay to DustSettleTimeout.
prd.reset()
return prd.nextDelay
}
delay := prd.nextDelay
if growth := prd.cfg.NoWorkDelayGrowth; growth > 1 {
prd.nextDelay *= time.Duration(growth)
}
if max := time.Duration(prd.cfg.MaxNoWorkDelay); max > 0 && delay > max {
delay = max
// Cap our "next delay" so it doesn't grow unbounded in the background.
prd.nextDelay = delay
}
// Enforce a no work lower bound.
if delay < minNoWorkDelay {
delay = minNoWorkDelay
}
return delay
}
| {
return nil, nil, me
} | conditional_block |
process.go | // Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tumble
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"time"
"go.chromium.org/luci/appengine/memlock"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/common/sync/parallel"
"go.chromium.org/luci/common/tsmon/field"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/gae/filter/txnBuf"
ds "go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/datastore/serialize"
"go.chromium.org/gae/service/info"
mc "go.chromium.org/gae/service/memcache"
)
const (
// minNoWorkDelay is the minimum amount of time to sleep in between rounds if
// there was no work done in that round.
minNoWorkDelay = time.Second
)
var metricCompleted = metric.NewCounter(
"luci/tumble/mutations/completed",
"The number of mutations completed by tumble, but not necessarily deleted.",
nil,
field.String("namespace"),
)
var metricFailed = metric.NewCounter(
"luci/tumble/mutations/failed",
"The number of mutations attempted in tumble, but failed to complete.",
nil,
field.String("namespace"),
)
var metricDeleted = metric.NewCounter(
"luci/tumble/mutations/deleted",
"The number of mutations deleted by tumble.",
nil,
field.String("namespace"),
)
// expandedShardBounds returns the boundary of the expandedShard order that
// currently corresponds to this shard number. If Shard is < 0 or > NumShards
// (the currently configured number of shards), this will return a low > high.
// Otherwise low < high.
func expandedShardBounds(c context.Context, cfg *Config, shard uint64) (low, high int64) {
totalShards := cfg.TotalShardCount(info.GetNamespace(c))
if shard < 0 || uint64(shard) >= totalShards {
logging.Warningf(c, "Invalid shard: %d", shard)
// return inverted bounds
return 0, -1
}
expandedShardsPerShard := int64(math.MaxUint64 / totalShards)
low = math.MinInt64 + (int64(shard) * expandedShardsPerShard)
if uint64(shard) == totalShards-1 {
high = math.MaxInt64
} else {
high = low + expandedShardsPerShard
}
return
}
func processShardQuery(c context.Context, cfg *Config, shard uint64) *ds.Query {
low, high := expandedShardBounds(c, cfg, shard)
if low > high {
return nil
}
q := ds.NewQuery("tumble.Mutation").
Gte("ExpandedShard", low).Lte("ExpandedShard", high).
Project("TargetRoot").Distinct(true)
return q
}
// processShard is the tumble backend endpoint. This accepts a shard number
// which is expected to be < GlobalConfig.NumShards.
func processShard(c context.Context, cfg *Config, timestamp time.Time, shard uint64, loop bool) error {
logging.Fields{
"shard": shard,
}.Infof(c, "Processing tumble shard.")
q := processShardQuery(c, cfg, shard)
if q == nil {
logging.Warningf(c, "dead shard, quitting")
return nil
}
// Calculate our end itme. If we're not looping or we have a <= 0 duration,
// we will perform a single loop.
var endTime time.Time
if cfg.ProcessLoopDuration > 0 {
endTime = clock.Now(c).Add(time.Duration(cfg.ProcessLoopDuration))
logging.Debugf(c, "Process loop is configured to exit after [%s] at %s",
cfg.ProcessLoopDuration.String(), endTime)
}
// Lock around the shard that we are trying to modify.
//
// Since memcache is namespaced, we don't need to include the namespace in our
// lock name.
task := makeProcessTask(timestamp, endTime, shard, loop)
lockKey := fmt.Sprintf("%s.%d.lock", baseName, shard)
clientID := fmt.Sprintf("%d_%d_%s", timestamp.Unix(), shard, info.RequestID(c))
err := memlock.TryWithLock(c, lockKey, clientID, func(c context.Context) error {
return task.process(c, cfg, q)
})
if err == memlock.ErrFailedToLock {
logging.Infof(c, "Couldn't obtain lock (giving up): %s", err)
return nil
}
return err
}
// processTask is a stateful processing task.
type processTask struct {
timestamp time.Time
endTime time.Time
lastKey string
banSets map[string]stringset.Set
loop bool
}
func makeProcessTask(timestamp, endTime time.Time, shard uint64, loop bool) *processTask {
return &processTask{
timestamp: timestamp,
endTime: endTime,
lastKey: fmt.Sprintf("%s.%d.last", baseName, shard),
banSets: make(map[string]stringset.Set),
loop: loop,
}
}
func (t *processTask) process(c context.Context, cfg *Config, q *ds.Query) error {
// this last key allows buffered tasks to early exit if some other shard
// processor has already processed past this task's target timestamp.
lastItm, err := mc.GetKey(c, t.lastKey)
if err != nil {
if err != mc.ErrCacheMiss {
logging.Warningf(c, "couldn't obtain last timestamp: %s", err)
}
} else {
val := lastItm.Value()
last, err := serialize.ReadTime(bytes.NewBuffer(val))
if err != nil {
logging.Warningf(c, "could not decode timestamp %v: %s", val, err)
} else {
last = last.Add(time.Duration(cfg.TemporalRoundFactor))
if last.After(t.timestamp) {
logging.Infof(c, "early exit, %s > %s", last, t.timestamp)
return nil
}
}
}
err = nil
// Loop until our shard processing session expires.
prd := processRoundDelay{
cfg: cfg,
}
prd.reset()
for {
var numProcessed, errCount, transientErrCount counter
// Run our query against a work pool.
//
// NO work pool methods will return errors, so there is no need to collect
// the result. Rather, any error that is encountered will atomically update
// the "errCount" counter (for non-transient errors) or "transientErrCount"
// counter (for transient errors).
_ = parallel.WorkPool(int(cfg.NumGoroutines), func(ch chan<- func() error) {
err := ds.Run(c, q, func(pm ds.PropertyMap) error {
root := pm.Slice("TargetRoot")[0].Value().(*ds.Key)
encRoot := root.Encode()
// TODO(riannucci): make banSets remove keys from the banSet which
// weren't hit. Once they stop showing up, they'll never show up
// again.
bs := t.banSets[encRoot]
if bs == nil {
bs = stringset.New(0)
t.banSets[encRoot] = bs
}
ch <- func() error {
switch err := processRoot(c, cfg, root, bs, &numProcessed); err {
case nil:
return nil
case ds.ErrConcurrentTransaction:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Warningf(c, "Transient error encountered processing root.")
transientErrCount.inc()
return nil
default:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Errorf(c, "Failed to process root.")
errCount.inc()
return nil
}
}
if err := c.Err(); err != nil {
logging.WithError(err).Warningf(c, "Context canceled (lost lock?).")
return ds.Stop
}
return nil
})
if err != nil {
var qstr string
if fq, err := q.Finalize(); err == nil {
qstr = fq.String()
}
logging.Fields{
logging.ErrorKey: err,
"query": qstr,
}.Errorf(c, "Failure to run shard query.")
errCount.inc()
}
})
logging.Infof(c, "cumulatively processed %d items with %d errors(s) and %d transient error(s)",
numProcessed, errCount, transientErrCount)
switch {
case transientErrCount > 0:
return errors.New("transient error during shard processing", transient.Tag)
case errCount > 0:
return errors.New("encountered non-transient error during shard processing")
}
now := clock.Now(c)
didWork := numProcessed > 0
if didWork {
// Set our last key value for next round.
err = mc.Set(c, mc.NewItem(c, t.lastKey).SetValue(serialize.ToBytes(now.UTC())))
if err != nil {
logging.Warningf(c, "could not update last process memcache key %s: %s", t.lastKey, err)
}
} else if t.endTime.IsZero() || !t.loop {
// We didn't do any work this round, and we're configured for a single
// loop, so we're done.
logging.Debugf(c, "Configured for single loop.")
return nil
}
// If we're past our end time, then we're done.
if !t.endTime.IsZero() && now.After(t.endTime) {
logging.Debugf(c, "Exceeded our process loop time by [%s]; terminating loop.", now.Sub(t.endTime))
return nil
}
// Either we are looping, we did work last round, or both. Sleep in between
// processing rounds for a duration based on whether or not we did work.
delay := prd.next(didWork)
if delay > 0 {
// If we have an end time, and this delay would exceed that end time, then
// don't bother sleeping; we're done.
if !t.endTime.IsZero() && now.Add(delay).After(t.endTime) {
logging.Debugf(c, "Delay (%s) exceeds process loop time (%s); terminating loop.",
delay, t.endTime)
return nil
}
logging.Debugf(c, "Sleeping %s in between rounds...", delay)
if err := clock.Sleep(c, delay).Err; err != nil {
logging.WithError(err).Warningf(c, "Sleep interrupted, terminating loop.")
return nil
}
}
}
}
func getBatchByRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set) ([]*realMutation, error) |
func loadFilteredMutations(c context.Context, rms []*realMutation) ([]*ds.Key, []Mutation, error) {
mutKeys := make([]*ds.Key, 0, len(rms))
muts := make([]Mutation, 0, len(rms))
err := ds.Get(c, rms)
me, ok := err.(errors.MultiError)
if !ok && err != nil {
return nil, nil, err
}
for i, rm := range rms {
err = nil
if me != nil {
err = me[i]
}
if err == nil {
if rm.Version != getAppVersion(c) {
logging.Fields{
"mut_version": rm.Version,
"cur_version": getAppVersion(c),
}.Warningf(c, "loading mutation with different code version")
}
m, err := rm.GetMutation()
if err != nil {
logging.Errorf(c, "couldn't load mutation: %s", err)
continue
}
muts = append(muts, m)
mutKeys = append(mutKeys, ds.KeyForObj(c, rm))
} else if err != ds.ErrNoSuchEntity {
return nil, nil, me
}
}
return mutKeys, muts, nil
}
type overrideRoot struct {
Mutation
root *ds.Key
}
func (o overrideRoot) Root(context.Context) *ds.Key {
return o.root
}
func processRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set, cnt *counter) error {
l := logging.Get(c)
toFetch, err := getBatchByRoot(c, cfg, root, banSet)
switch {
case err != nil:
l.Errorf("Failed to get batch for root [%s]: %s", root, err)
return err
case len(toFetch) == 0:
return nil
}
mutKeys, muts, err := loadFilteredMutations(c, toFetch)
if err != nil {
return err
}
if c.Err() != nil {
l.Warningf("Lost lock during processRoot")
return nil
}
allShards := map[taskShard]struct{}{}
toDel := make([]*ds.Key, 0, len(muts))
var numMuts, deletedMuts, processedMuts int
err = ds.RunInTransaction(txnBuf.FilterRDS(c), func(c context.Context) error {
toDel = toDel[:0]
numMuts = 0
deletedMuts = 0
processedMuts = 0
iterMuts := muts
iterMutKeys := mutKeys
for i := 0; i < len(iterMuts); i++ {
m := iterMuts[i]
s := clock.Now(c)
logging.Fields{"m": m}.Infof(c, "running RollForward")
shards, newMuts, newMutKeys, err := enterTransactionMutation(c, cfg, overrideRoot{m, root}, uint64(i))
logging.Fields{"m": m}.Infof(c, "done RollForward, took %s", clock.Now(c).Sub(s))
if err != nil {
l.Errorf("Executing decoded gob(%T) failed: %q: %+v", m, err, m)
continue
}
processedMuts++
for j, nm := range newMuts {
if nm.Root(c).HasAncestor(root) {
runNow := !cfg.DelayedMutations
if !runNow {
dm, isDelayedMutation := nm.(DelayedMutation)
runNow = !isDelayedMutation || clock.Now(c).UTC().After(dm.ProcessAfter())
}
if runNow {
iterMuts = append(iterMuts, nm)
iterMutKeys = append(iterMutKeys, newMutKeys[j])
}
}
}
// Finished processing this Mutation.
key := iterMutKeys[i]
switch {
case key.HasAncestor(root):
// try to delete it as part of the same transaction.
if err := ds.Delete(c, key); err == nil {
deletedMuts++
break
}
fallthrough // Failed to delete, try again outside of the transaction.
default:
toDel = append(toDel, key)
}
numMuts += len(newMuts)
for shard := range shards {
allShards[shard] = struct{}{}
}
}
return nil
}, nil)
if err != nil {
l.Errorf("failed running transaction: %s", err)
metricFailed.Add(c, int64(numMuts), root.Namespace())
return err
}
fireTasks(c, cfg, allShards, true)
l.Debugf("successfully processed %d mutations (%d tail-call), delta %d",
processedMuts, deletedMuts, (numMuts - deletedMuts))
metricCompleted.Add(c, int64(processedMuts), root.Namespace())
// This is for the mutations deleted in a transaction.
metricDeleted.Add(c, int64(deletedMuts), root.Namespace())
cnt.add(processedMuts)
if len(toDel) > 0 {
for _, k := range toDel {
banSet.Add(k.Encode())
}
if err := ds.Delete(c, toDel); err != nil {
// This is categorized as failed because it's going to get retried again.
metricFailed.Add(c, int64(len(toDel)), root.Namespace())
l.Warningf("error deleting finished mutations: %s", err)
} else {
// This is for mutations deleted outside of the transaction,
// because they failed to delete the first time we tried to do it
// inside the transaction.
metricDeleted.Add(c, int64(len(toDel)), root.Namespace())
}
}
return nil
}
// counter is an atomic integer counter.
//
// When concurrent access is possible, a counter must only be manipulated with
// its "inc" and "add" methods, and must not be read.
//
// We use an int32 because that is atomically safe across all architectures.
type counter int32
func (c *counter) inc() int { return c.add(1) }
func (c *counter) add(n int) int { return int(atomic.AddInt32((*int32)(c), int32(n))) }
// processRoundDelay calculates the delay to impose in between processing
// rounds.
type processRoundDelay struct {
cfg *Config
nextDelay time.Duration
}
func (prd *processRoundDelay) reset() {
// Reset our delay to DustSettleTimeout.
prd.nextDelay = time.Duration(prd.cfg.DustSettleTimeout)
}
func (prd *processRoundDelay) next(didWork bool) time.Duration {
if didWork {
// Reset our delay to DustSettleTimeout.
prd.reset()
return prd.nextDelay
}
delay := prd.nextDelay
if growth := prd.cfg.NoWorkDelayGrowth; growth > 1 {
prd.nextDelay *= time.Duration(growth)
}
if max := time.Duration(prd.cfg.MaxNoWorkDelay); max > 0 && delay > max {
delay = max
// Cap our "next delay" so it doesn't grow unbounded in the background.
prd.nextDelay = delay
}
// Enforce a no work lower bound.
if delay < minNoWorkDelay {
delay = minNoWorkDelay
}
return delay
}
| {
q := ds.NewQuery("tumble.Mutation").Eq("TargetRoot", root)
if cfg.DelayedMutations {
q = q.Lte("ProcessAfter", clock.Now(c).UTC())
}
fetchAllocSize := cfg.ProcessMaxBatchSize
if fetchAllocSize < 0 {
fetchAllocSize = 0
}
toFetch := make([]*realMutation, 0, fetchAllocSize)
err := ds.Run(c, q, func(k *ds.Key) error {
if !banSet.Has(k.Encode()) {
toFetch = append(toFetch, &realMutation{
ID: k.StringID(),
Parent: k.Parent(),
})
}
if len(toFetch) < cap(toFetch) {
return nil
}
return ds.Stop
})
return toFetch, err
} | identifier_body |
process.go | // Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tumble
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"time"
"go.chromium.org/luci/appengine/memlock"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/retry/transient"
"go.chromium.org/luci/common/sync/parallel"
"go.chromium.org/luci/common/tsmon/field"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/gae/filter/txnBuf"
ds "go.chromium.org/gae/service/datastore"
"go.chromium.org/gae/service/datastore/serialize"
"go.chromium.org/gae/service/info"
mc "go.chromium.org/gae/service/memcache"
)
const (
// minNoWorkDelay is the minimum amount of time to sleep in between rounds if
// there was no work done in that round.
minNoWorkDelay = time.Second
)
var metricCompleted = metric.NewCounter(
"luci/tumble/mutations/completed",
"The number of mutations completed by tumble, but not necessarily deleted.",
nil,
field.String("namespace"),
)
var metricFailed = metric.NewCounter(
"luci/tumble/mutations/failed",
"The number of mutations attempted in tumble, but failed to complete.",
nil,
field.String("namespace"),
)
var metricDeleted = metric.NewCounter(
"luci/tumble/mutations/deleted",
"The number of mutations deleted by tumble.",
nil,
field.String("namespace"),
)
// expandedShardBounds returns the boundary of the expandedShard order that
// currently corresponds to this shard number. If Shard is < 0 or > NumShards
// (the currently configured number of shards), this will return a low > high.
// Otherwise low < high.
func expandedShardBounds(c context.Context, cfg *Config, shard uint64) (low, high int64) {
totalShards := cfg.TotalShardCount(info.GetNamespace(c))
if shard < 0 || uint64(shard) >= totalShards {
logging.Warningf(c, "Invalid shard: %d", shard)
// return inverted bounds
return 0, -1
}
expandedShardsPerShard := int64(math.MaxUint64 / totalShards)
low = math.MinInt64 + (int64(shard) * expandedShardsPerShard)
if uint64(shard) == totalShards-1 {
high = math.MaxInt64
} else {
high = low + expandedShardsPerShard
}
return
}
func processShardQuery(c context.Context, cfg *Config, shard uint64) *ds.Query {
low, high := expandedShardBounds(c, cfg, shard)
if low > high {
return nil
}
q := ds.NewQuery("tumble.Mutation").
Gte("ExpandedShard", low).Lte("ExpandedShard", high).
Project("TargetRoot").Distinct(true)
return q
}
// processShard is the tumble backend endpoint. This accepts a shard number
// which is expected to be < GlobalConfig.NumShards.
func processShard(c context.Context, cfg *Config, timestamp time.Time, shard uint64, loop bool) error {
logging.Fields{
"shard": shard,
}.Infof(c, "Processing tumble shard.")
q := processShardQuery(c, cfg, shard)
if q == nil {
logging.Warningf(c, "dead shard, quitting")
return nil
}
// Calculate our end itme. If we're not looping or we have a <= 0 duration,
// we will perform a single loop.
var endTime time.Time
if cfg.ProcessLoopDuration > 0 {
endTime = clock.Now(c).Add(time.Duration(cfg.ProcessLoopDuration))
logging.Debugf(c, "Process loop is configured to exit after [%s] at %s",
cfg.ProcessLoopDuration.String(), endTime)
}
// Lock around the shard that we are trying to modify.
//
// Since memcache is namespaced, we don't need to include the namespace in our
// lock name.
task := makeProcessTask(timestamp, endTime, shard, loop)
lockKey := fmt.Sprintf("%s.%d.lock", baseName, shard)
clientID := fmt.Sprintf("%d_%d_%s", timestamp.Unix(), shard, info.RequestID(c))
err := memlock.TryWithLock(c, lockKey, clientID, func(c context.Context) error {
return task.process(c, cfg, q)
})
if err == memlock.ErrFailedToLock {
logging.Infof(c, "Couldn't obtain lock (giving up): %s", err)
return nil
}
return err
}
// processTask is a stateful processing task.
type processTask struct {
timestamp time.Time
endTime time.Time
lastKey string
banSets map[string]stringset.Set
loop bool
}
func makeProcessTask(timestamp, endTime time.Time, shard uint64, loop bool) *processTask {
return &processTask{
timestamp: timestamp,
endTime: endTime,
lastKey: fmt.Sprintf("%s.%d.last", baseName, shard),
banSets: make(map[string]stringset.Set),
loop: loop,
}
}
func (t *processTask) process(c context.Context, cfg *Config, q *ds.Query) error {
// this last key allows buffered tasks to early exit if some other shard
// processor has already processed past this task's target timestamp.
lastItm, err := mc.GetKey(c, t.lastKey)
if err != nil {
if err != mc.ErrCacheMiss {
logging.Warningf(c, "couldn't obtain last timestamp: %s", err)
}
} else {
val := lastItm.Value()
last, err := serialize.ReadTime(bytes.NewBuffer(val))
if err != nil {
logging.Warningf(c, "could not decode timestamp %v: %s", val, err)
} else {
last = last.Add(time.Duration(cfg.TemporalRoundFactor))
if last.After(t.timestamp) {
logging.Infof(c, "early exit, %s > %s", last, t.timestamp)
return nil
}
}
}
err = nil
// Loop until our shard processing session expires.
prd := processRoundDelay{
cfg: cfg,
}
prd.reset()
for {
var numProcessed, errCount, transientErrCount counter
// Run our query against a work pool.
//
// NO work pool methods will return errors, so there is no need to collect
// the result. Rather, any error that is encountered will atomically update
// the "errCount" counter (for non-transient errors) or "transientErrCount"
// counter (for transient errors).
_ = parallel.WorkPool(int(cfg.NumGoroutines), func(ch chan<- func() error) {
err := ds.Run(c, q, func(pm ds.PropertyMap) error {
root := pm.Slice("TargetRoot")[0].Value().(*ds.Key)
encRoot := root.Encode()
// TODO(riannucci): make banSets remove keys from the banSet which
// weren't hit. Once they stop showing up, they'll never show up
// again.
bs := t.banSets[encRoot]
if bs == nil {
bs = stringset.New(0)
t.banSets[encRoot] = bs
}
ch <- func() error {
switch err := processRoot(c, cfg, root, bs, &numProcessed); err {
case nil:
return nil
case ds.ErrConcurrentTransaction:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Warningf(c, "Transient error encountered processing root.")
transientErrCount.inc()
return nil
default:
logging.Fields{
logging.ErrorKey: err,
"root": root,
}.Errorf(c, "Failed to process root.")
errCount.inc()
return nil
}
}
if err := c.Err(); err != nil {
logging.WithError(err).Warningf(c, "Context canceled (lost lock?).")
return ds.Stop
}
return nil
})
if err != nil {
var qstr string
if fq, err := q.Finalize(); err == nil {
qstr = fq.String()
}
logging.Fields{
logging.ErrorKey: err,
"query": qstr,
}.Errorf(c, "Failure to run shard query.")
errCount.inc()
}
})
logging.Infof(c, "cumulatively processed %d items with %d errors(s) and %d transient error(s)",
numProcessed, errCount, transientErrCount)
switch {
case transientErrCount > 0:
return errors.New("transient error during shard processing", transient.Tag)
case errCount > 0:
return errors.New("encountered non-transient error during shard processing")
}
now := clock.Now(c)
didWork := numProcessed > 0
if didWork {
// Set our last key value for next round.
err = mc.Set(c, mc.NewItem(c, t.lastKey).SetValue(serialize.ToBytes(now.UTC())))
if err != nil {
logging.Warningf(c, "could not update last process memcache key %s: %s", t.lastKey, err)
}
} else if t.endTime.IsZero() || !t.loop {
// We didn't do any work this round, and we're configured for a single
// loop, so we're done.
logging.Debugf(c, "Configured for single loop.")
return nil
}
// If we're past our end time, then we're done.
if !t.endTime.IsZero() && now.After(t.endTime) {
logging.Debugf(c, "Exceeded our process loop time by [%s]; terminating loop.", now.Sub(t.endTime))
return nil
}
// Either we are looping, we did work last round, or both. Sleep in between
// processing rounds for a duration based on whether or not we did work.
delay := prd.next(didWork)
if delay > 0 {
// If we have an end time, and this delay would exceed that end time, then
// don't bother sleeping; we're done.
if !t.endTime.IsZero() && now.Add(delay).After(t.endTime) {
logging.Debugf(c, "Delay (%s) exceeds process loop time (%s); terminating loop.",
delay, t.endTime)
return nil
}
logging.Debugf(c, "Sleeping %s in between rounds...", delay)
if err := clock.Sleep(c, delay).Err; err != nil {
logging.WithError(err).Warningf(c, "Sleep interrupted, terminating loop.")
return nil
}
}
}
}
func getBatchByRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set) ([]*realMutation, error) {
q := ds.NewQuery("tumble.Mutation").Eq("TargetRoot", root)
if cfg.DelayedMutations {
q = q.Lte("ProcessAfter", clock.Now(c).UTC())
}
fetchAllocSize := cfg.ProcessMaxBatchSize
if fetchAllocSize < 0 {
fetchAllocSize = 0
}
toFetch := make([]*realMutation, 0, fetchAllocSize)
err := ds.Run(c, q, func(k *ds.Key) error {
if !banSet.Has(k.Encode()) {
toFetch = append(toFetch, &realMutation{
ID: k.StringID(),
Parent: k.Parent(),
})
}
if len(toFetch) < cap(toFetch) {
return nil
}
return ds.Stop
})
return toFetch, err
}
func loadFilteredMutations(c context.Context, rms []*realMutation) ([]*ds.Key, []Mutation, error) {
mutKeys := make([]*ds.Key, 0, len(rms))
muts := make([]Mutation, 0, len(rms))
err := ds.Get(c, rms)
me, ok := err.(errors.MultiError)
if !ok && err != nil {
return nil, nil, err
}
for i, rm := range rms {
err = nil
if me != nil {
err = me[i]
}
if err == nil {
if rm.Version != getAppVersion(c) {
logging.Fields{
"mut_version": rm.Version,
"cur_version": getAppVersion(c),
}.Warningf(c, "loading mutation with different code version")
}
m, err := rm.GetMutation()
if err != nil {
logging.Errorf(c, "couldn't load mutation: %s", err)
continue
}
muts = append(muts, m)
mutKeys = append(mutKeys, ds.KeyForObj(c, rm))
} else if err != ds.ErrNoSuchEntity {
return nil, nil, me
}
}
return mutKeys, muts, nil
}
type overrideRoot struct {
Mutation
root *ds.Key
}
func (o overrideRoot) Root(context.Context) *ds.Key {
return o.root
}
func processRoot(c context.Context, cfg *Config, root *ds.Key, banSet stringset.Set, cnt *counter) error {
l := logging.Get(c)
toFetch, err := getBatchByRoot(c, cfg, root, banSet)
switch {
case err != nil:
l.Errorf("Failed to get batch for root [%s]: %s", root, err)
return err
case len(toFetch) == 0:
return nil
}
mutKeys, muts, err := loadFilteredMutations(c, toFetch)
if err != nil {
return err
}
if c.Err() != nil {
l.Warningf("Lost lock during processRoot")
return nil
}
allShards := map[taskShard]struct{}{}
toDel := make([]*ds.Key, 0, len(muts))
var numMuts, deletedMuts, processedMuts int
err = ds.RunInTransaction(txnBuf.FilterRDS(c), func(c context.Context) error {
toDel = toDel[:0]
numMuts = 0
deletedMuts = 0
processedMuts = 0
iterMuts := muts
iterMutKeys := mutKeys
for i := 0; i < len(iterMuts); i++ {
m := iterMuts[i]
s := clock.Now(c)
logging.Fields{"m": m}.Infof(c, "running RollForward")
shards, newMuts, newMutKeys, err := enterTransactionMutation(c, cfg, overrideRoot{m, root}, uint64(i))
logging.Fields{"m": m}.Infof(c, "done RollForward, took %s", clock.Now(c).Sub(s))
if err != nil {
l.Errorf("Executing decoded gob(%T) failed: %q: %+v", m, err, m)
continue
}
processedMuts++
for j, nm := range newMuts {
if nm.Root(c).HasAncestor(root) {
runNow := !cfg.DelayedMutations
if !runNow {
dm, isDelayedMutation := nm.(DelayedMutation)
runNow = !isDelayedMutation || clock.Now(c).UTC().After(dm.ProcessAfter())
}
if runNow {
iterMuts = append(iterMuts, nm)
iterMutKeys = append(iterMutKeys, newMutKeys[j])
}
}
}
// Finished processing this Mutation.
key := iterMutKeys[i]
switch {
case key.HasAncestor(root):
// try to delete it as part of the same transaction.
if err := ds.Delete(c, key); err == nil {
deletedMuts++
break
}
fallthrough // Failed to delete, try again outside of the transaction.
default:
toDel = append(toDel, key)
}
numMuts += len(newMuts)
for shard := range shards {
allShards[shard] = struct{}{}
}
}
return nil
}, nil)
if err != nil {
l.Errorf("failed running transaction: %s", err)
metricFailed.Add(c, int64(numMuts), root.Namespace())
return err
}
fireTasks(c, cfg, allShards, true)
l.Debugf("successfully processed %d mutations (%d tail-call), delta %d",
processedMuts, deletedMuts, (numMuts - deletedMuts))
metricCompleted.Add(c, int64(processedMuts), root.Namespace())
// This is for the mutations deleted in a transaction.
metricDeleted.Add(c, int64(deletedMuts), root.Namespace())
cnt.add(processedMuts)
if len(toDel) > 0 {
for _, k := range toDel {
banSet.Add(k.Encode())
}
if err := ds.Delete(c, toDel); err != nil {
// This is categorized as failed because it's going to get retried again.
metricFailed.Add(c, int64(len(toDel)), root.Namespace())
l.Warningf("error deleting finished mutations: %s", err)
} else {
// This is for mutations deleted outside of the transaction,
// because they failed to delete the first time we tried to do it
// inside the transaction.
metricDeleted.Add(c, int64(len(toDel)), root.Namespace())
}
}
return nil
}
// counter is an atomic integer counter.
//
// When concurrent access is possible, a counter must only be manipulated with
// its "inc" and "add" methods, and must not be read.
//
// We use an int32 because that is atomically safe across all architectures.
type counter int32
func (c *counter) inc() int { return c.add(1) }
func (c *counter) | (n int) int { return int(atomic.AddInt32((*int32)(c), int32(n))) }
// processRoundDelay calculates the delay to impose in between processing
// rounds.
type processRoundDelay struct {
cfg *Config
nextDelay time.Duration
}
func (prd *processRoundDelay) reset() {
// Reset our delay to DustSettleTimeout.
prd.nextDelay = time.Duration(prd.cfg.DustSettleTimeout)
}
func (prd *processRoundDelay) next(didWork bool) time.Duration {
if didWork {
// Reset our delay to DustSettleTimeout.
prd.reset()
return prd.nextDelay
}
delay := prd.nextDelay
if growth := prd.cfg.NoWorkDelayGrowth; growth > 1 {
prd.nextDelay *= time.Duration(growth)
}
if max := time.Duration(prd.cfg.MaxNoWorkDelay); max > 0 && delay > max {
delay = max
// Cap our "next delay" so it doesn't grow unbounded in the background.
prd.nextDelay = delay
}
// Enforce a no work lower bound.
if delay < minNoWorkDelay {
delay = minNoWorkDelay
}
return delay
}
| add | identifier_name |
server-tcp.ts | import { MatrixService } from './../db/matrix/matrix.service';
import { Trade } from './../common/models/trade';
import { ExchangeService } from './../db/exchange/exchange.service';
import { Order } from './../common/models/order';
const net = require('toa-net');
import { Parser } from './parser';
import { OrderBookService } from './../db/orderBook/orderBook.service';
import { OrderService } from './../db/order/order.service';
import { Controller } from '@nestjs/common';
import { ClientTcp } from './client-tcp';
import { TradeService } from './../db/trade/trade.service';
import { StateTrading } from './../common/models/stateTrading';
import { ExchangeData } from './../common/models/exchangeData';
import { SERVER_CONFIG } from './../../server.constants';
import { ArbitrageBalanceService } from '../db/arbit-balance/arbit-balance.service';
import { PureTrade } from '../common/models/pureTrade';
import { setInterval } from 'timers';
import { Order5BookService } from '../db/orderBook_5/orderBook_5.service';
const auth = new net.Auth('secretxxx');
@Controller()
export class ServerTcpBot {
server: any;
parser: Parser;
clientsTcp: ClientTcp[] = [];
stateTrading: StateTrading[] = [];
startFlag = true;
// флаг который с фронта управляет возможностью совершения сделок
avalableArbitrage = false;
constructor(
private readonly orderBooksService: OrderBookService,
private readonly order5BooksService: Order5BookService,
private readonly orderService: OrderService,
private readonly tradeService: TradeService,
private readonly exchangeService: ExchangeService,
private readonly arbitrageBalanceService: ArbitrageBalanceService,
private readonly matrixService: MatrixService) {
this.parser = new Parser(this.orderBooksService, this.order5BooksService, this.exchangeService, this.arbitrageBalanceService,
this.tradeService, this.matrixService);
}
// запрещает ведение новых арбитражных сделок
stopArbitrage() {
this.avalableArbitrage = false;
console.log('Arbitrage stoped');
}
// разрешает ведение новых арбитражных сделок
startArbitrage() {
this.avalableArbitrage = true;
console.log('Arbitrage started');
}
// получаем сделки с 1 полуцикла
async getFirstTradeArbitrage(id: string) {
const arbitrage = await this.arbitrageBalanceService.findArbitrageById(id);
return [arbitrage.firstCickleSell, arbitrage.firstCickleBuy];
}
// вызов с фронта закрытия 2 - го полуцикла
async closeSecondArbitrage(trades: PureTrade[], arbitId: string) {
const orders = await this.parser.closeSecondArbitrage(trades, arbitId);
// console.log('==============object :', orders);
if (orders.length) {
console.log(' ++++++++++++++++closeSecondArbitrage call :sendOrdersToBot');
this.sendOrdersToBot(orders, 'taker');
this.arbitrageBalanceService.closeSecondCircleStatus(arbitId);
}
}
async passTradeToDB(message: any, status: string) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
// const status = (trade.remainingSize === 0) ? 'done' : 'partial';
if (trade) {
await this.combineTrade(trade, status);
}
}
}
}
async combineTrade(trade: Trade, status?: string) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, status, '');
this.parser.subTradedVolume(trade);
this.tradeService.addNewData(trade);
this.countPureTrade(trade);
let newOrder;
// console.log('trade.arbitID :', trade.arbitrageId, trade.typeOrder);
if (this.parser.orderFullFilled(trade)) {
const newOrderBook = this.parser.addNewOrderBookData();
// if (this.avalableArbitrage) {
newOrder = await this.parser.defineSellBuy(newOrderBook);
console.log(' 80parser.defineSellBuy:', newOrder);
// this.parser.removeCheckerSentOrders(trade.arbitrageId);
// }
} else {
if (this.avalableArbitrage) {
console.log('85this.parser.makePartialOrder:');
newOrder = await this.parser.makePartialOrder(trade);
}
}
if (newOrder) {
console.log(' 91 sendOrdersToBot( :');
await this.sendOrdersFromPromise(newOrder);
}
}
async countPureTrade(trade: Trade) {
const sameTrade = await this.tradeService.findTrade(trade);
if (!sameTrade) {
console.log('sameTrade :', sameTrade);
const pureTrade = await this.parser.addNewArbitrageTrade(trade);
await this.arbitrageBalanceService.addNewTrade(pureTrade, trade.arbitrageId);
await this.parser.removeCheckerSentOrders(trade.arbitrageId);
} else {
this.checkOrder(trade.arbitrageId, trade.typeOrder, trade.exchange);
}
}
async sendOrdersFromPromise(newOrder: any) {
console.log(' 125sendOrders FromPromise call :sendOrdersToBot');
await this.sendOrdersToBot(newOrder, 'taker'); | generateOrderAfterCancel(message: any) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, 'cancelled', '');
const order: Order[] = this.parser.replaceCancelledOrderByNewOrder(trade);
if (order) {
console.log('generateOrderAfterCancel call sendOrdersToBot :');
this.sendOrdersToBot(order);
}
}
}
}
createTcpServer() {
if (!this.server) {
this.startServer();
this.parser.startSaverOrderBooks();
} else if (!this.server.address()) {
this.startServer();
} else {
console.log('the already started');
}
}
// запуск сервера для прослушивания по TCP на порту сообщений
startServer() {
this.server = new net.Server((socket: any) => {
socket.on('message', async (message: any) => {
// console.log('message : %j', message);
if (message.type === 'notification' // && message.payload.method === 'trades' ||
&& message.payload.method === 'partial' || message.payload.method === 'done') {
console.log(' message.payload.method :', message.payload.method);
this.passTradeToDB(message, message.payload.method);
}
if (message.type === 'notification' && message.payload.method === 'resCheckOrder') {
const confirmedOrder = message.payload.params[0];
if (confirmedOrder) {
console.log('confirmedOrder :',
confirmedOrder.arbitrageId, confirmedOrder.status, confirmedOrder.exchOrderId, confirmedOrder.typeOrder);
await this.orderService.updateStatusOrder(confirmedOrder.arbitrageId, confirmedOrder.typeOrder,
confirmedOrder.exchangeId, confirmedOrder.status, '');
if (confirmedOrder.status === 'notSend') {
console.log('!!! confirmedOrder id status :', confirmedOrder.arbitrageId, confirmedOrder.status);
} else {
const trade = this.tradeService.findTrade(confirmedOrder);
if (trade) {
this.countPureTrade(confirmedOrder as Trade);
this.parser.removeCheckerSentOrders(confirmedOrder.arbitrageId);
}
}
}
}
if (message.type === 'notification' && message.payload.method === 'statusOrder') {
console.log('168 ########### status=', message.payload.params[3]);
this.orderService.updateStatusOrder(message.payload.params[0], message.payload.params[1],
message.payload.params[2], message.payload.params[3], message.payload.params[4]);
if (message.payload.params[3] === 'open') {
const trade = {
exchange: '', pair: '', price: '', volume: '', typeOrder: message.payload.params[1],
arbitOrderId: message.payload.params[0], exchOrderId: '', time: ''
};
// this.checkOrder(trade);
}
/* if (message.payload.params[3] === 'done') {
this.passTradeToDB(message, message.payload.params[3]);
} */
if (message.payload.params[3] === 'cancelled') {
this.generateOrderAfterCancel(message);
}
}
else {
const parsedMessage = this.parser.parseTcpMessage(message);
this.parser.calculateAskBid(parsedMessage);
const newOrderBook = this.parser.addNewOrderBookData();
// console.log('newOrderBook :', newOrderBook);
if (this.avalableArbitrage) {
if (this.startFlag) {
const orders = await this.parser.defineSellBuy(newOrderBook);
console.log('this.startFlag :', this.startFlag, this.avalableArbitrage);
this.sendOrdersToBot(orders);
this.startFlag = false;
}
}
}
});
});
this.server.listen(SERVER_CONFIG.tcpPort);
console.log(`Tcp server listen port ${SERVER_CONFIG.tcpPort}`);
// Enable authentication for server
this.server.getAuthenticator = () => {
return (signature: string) => auth.verify(signature);
};
}
private checkOrder(arbitId: string, orderType: string, exchange: string) {
return this.orderService.findOrderByIdExchange(arbitId, orderType, exchange)
.then((order) => {
if (order) {
const checkingOrder = {
nameOrder: 'checkOrder', order: { arbitrageId: arbitId, pairOrder: order.pair, typeOrder: orderType },
serverPort: order.port, host: order.host,
};
this.startClient(checkingOrder);
}
});
}
stopTcpServer() {
this.server.close();
console.log('Tcp server stoped');
}
createClient(clientSocket: any) {
const newClientTcp = new net.Client();
this.clientsTcp.push({ socket: clientSocket, client: newClientTcp });
newClientTcp.getSignature = () => {
return auth.sign({ id: 'clientIdxxx' });
};
newClientTcp.connect(clientSocket);
return newClientTcp;
}
getSpread() {
return this.parser.getBidAskSpread();
}
async getCurrentArbitrage() {
// получаем данные для фронта для незакрытых во 2 полуцикле сделок
const arbitrage = await this.parser.getNotClosedArbitrage();
const timeAfterSent = Date.now() - 60000;
const sentOpenOrders = await this.orderService.getSameStatusOrder('open', timeAfterSent);
if (sentOpenOrders.length) {
for (const sentOpenOrder of sentOpenOrders) {
// console.log('251check Order :', sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder);
this.checkOrder(sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder, sentOpenOrder.exchange);
}
}
// проверяем список отправленных но не подтвержденных ордеров 2 полуцикла
const notConfirmedOrders = this.parser.expiredTimeSendNotConfirmOrders(Date.now());
if (notConfirmedOrders.length) {
for (const secondOrders of notConfirmedOrders) {
this.checkOrder(secondOrders.arbitId, 'sell', secondOrders.secondBuyExchange);
this.checkOrder(secondOrders.arbitId, 'buy', secondOrders.secondSellExchange);
// получаем список closeSecondOrders с ордерами по которым
// не было подтверждения сделки в течение 1 мин
this.parser.setCurrentTimeSentOrder(Date.now(), secondOrders.arbitId);
// this.parser.closeSecondOrders = [];
}
}
if (this.parser.closeSecondOrders.length && arbitrage.length) {
// если список closeSecondOrders есть то повторно отправляем неподтвержденные ордера на зенбот
console.log('269 sent new order :');
this.sendOrdersToBot(this.parser.closeSecondOrders, 'taker');
// сразу обнуляем список closeSecondOrders
this.parser.closeSecondOrders = [];
}
return arbitrage;
}
sendOrdersToBot(orders: Order[], feeType?: string) {
let parametersOrder;
// console.log('============orders :', orders);
if (orders) {
for (const currentOrder of orders) {
if (!feeType) {
parametersOrder = {
nameOrder: 'sendOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
} else {
parametersOrder = {
nameOrder: 'sendTakerOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
}
if (parametersOrder.order.price > 0) {
this.startClient(parametersOrder);
this.orderService.addNewOrder(currentOrder);
}
}
}
}
async startClient(order: any) {
try {
if (order.host && order.serverPort) {
const clientSocket = `tcp://${order.host}:${order.serverPort}`;
let currentClient = this.defineTcpClient(clientSocket);
if (!currentClient) {
currentClient = this.createClient(clientSocket);
}
currentClient.on('error', (err: any) => {
if (err.code === 'ETIMEDOUT') {
currentClient.destroy();
}
currentClient.reconnect();
});
const stringOrder = JSON.stringify(order.order);
// console.log('321 order.nameOrder :', order.nameOrder);
const countSentOpenOrders = await this.orderService.checkOpenOrders(order.order.arbitrageId, 'open', order.order.typeOrder);
if (order.nameOrder === 'checkOrder') {
// console.log('countSentOpenOrders status', countSentOpenOrders, order.nameOrder);
currentClient.notification(order.nameOrder, [`${stringOrder}`]);
} else if (!countSentOpenOrders) {
console.log(' Sent Orders :', countSentOpenOrders, order.order.status);
await currentClient.notification(order.nameOrder, [`${stringOrder}`]);
await this.parser.changeStatusInSentOrders(order.order.arbitrageId, order.order.typeOrder);
}
}
} catch (e) {
console.log('err :', e);
}
}
defineTcpClient(socketTcp: any): any {
if (this.clientsTcp) {
for (const iterator of this.clientsTcp) {
if (iterator.socket === socketTcp) {
return iterator.client;
}
}
}
}
getCurrentPrice(): ExchangeData[] {
return this.parser.getCurrentPrice();
}
getExchangePrice(exchange: string, pair: string, typePrice: string) {
return this.parser.getExchangePrice(exchange, pair, typePrice);
}
} | }
| random_line_split |
server-tcp.ts | import { MatrixService } from './../db/matrix/matrix.service';
import { Trade } from './../common/models/trade';
import { ExchangeService } from './../db/exchange/exchange.service';
import { Order } from './../common/models/order';
const net = require('toa-net');
import { Parser } from './parser';
import { OrderBookService } from './../db/orderBook/orderBook.service';
import { OrderService } from './../db/order/order.service';
import { Controller } from '@nestjs/common';
import { ClientTcp } from './client-tcp';
import { TradeService } from './../db/trade/trade.service';
import { StateTrading } from './../common/models/stateTrading';
import { ExchangeData } from './../common/models/exchangeData';
import { SERVER_CONFIG } from './../../server.constants';
import { ArbitrageBalanceService } from '../db/arbit-balance/arbit-balance.service';
import { PureTrade } from '../common/models/pureTrade';
import { setInterval } from 'timers';
import { Order5BookService } from '../db/orderBook_5/orderBook_5.service';
const auth = new net.Auth('secretxxx');
@Controller()
export class ServerTcpBot {
server: any;
parser: Parser;
clientsTcp: ClientTcp[] = [];
stateTrading: StateTrading[] = [];
startFlag = true;
// флаг который с фронта управляет возможностью совершения сделок
avalableArbitrage = false;
constructor(
private readonly orderBooksService: OrderBookService,
private readonly order5BooksService: Order5BookService,
private readonly orderService: OrderService,
private readonly tradeService: TradeService,
private readonly exchangeService: ExchangeService,
private readonly arbitrageBalanceService: ArbitrageBalanceService,
private readonly matrixService: MatrixService) {
this.parser = new Parser(this.orderBooksService, this.order5BooksService, this.exchangeService, this.arbitrageBalanceService,
this.tradeService, this.matrixService);
}
// запрещает ведение новых арбитражных сделок
stopArbitrage() {
this.avalableArbitrage = false;
console.log('Arbitrage stoped');
}
// разрешает ведение новых арбитражных сделок
startArbitrage() {
this.avalableArbitrage = true;
console.log('Arbitrage started');
}
// получаем сделки с 1 полуцикла
async getFirstTradeArbitrage(id: string) {
const arbitrage = await this.arbitrageBalanceService.findArbitrageById(id);
return [arbitrage.firstCickleSell, arbitrage.firstCickleBuy];
}
// вызов с фронта закрытия 2 - го полуцикла
async closeSecondArbitrage(trades: PureTrade[], arbitId: string) {
const orders = await this.parser.closeSecondArbitrage(trades, arbitId);
// console.log('==============object :', orders);
if (orders.length) {
console.log(' ++++++++++++++++closeSecondArbitrage call :sendOrdersToBot');
this.sendOrdersToBot(orders, 'taker');
this.arbitrageBalanceService.closeSecondCircleStatus(arbitId);
}
}
async passTradeToDB(message: any, status: string) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
// const status = (trade.remainingSize === 0) ? 'done' : 'partial';
if (trade) {
await this.combineTrade(trade, status);
}
}
}
}
async combineTrade(trade: Trade, status?: string) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, status, '');
this.parser.subTradedVolume(trade);
this.tradeService.addNewData(trade);
this.countPureTrade(trade);
let newOrder;
// console.log('trade.arbitID :', trade.arbitrageId, trade.typeOrder);
if (this.parser.orderFullFilled(trade)) {
const newOrderBook = this.parser.addNewOrderBookData();
// if (this.avalableArbitrage) {
newOrder = await this.parser.defineSellBuy(newOrderBook);
console.log(' 80parser.defineSellBuy:', newOrder);
// this.parser.removeCheckerSentOrders(trade.arbitrageId);
// }
} else {
if (this.avalableArbitrage) {
console.log('85this.parser.makePartialOrder:');
newOrder = await this.parser.makePartialOrder(trade);
}
}
if (newOrder) {
console.log(' 91 sendOrdersToBot( :');
await this.sendOrdersFromPromise(newOrder);
}
}
async countPureTrade(trade: Trade) {
const sameTrade = await this.tradeService.findTrade(trade);
if (!sameTrade) {
console.log('sameTrade :', sameTrade);
const pureTrade = await this.parser.addNewArbitrageTrade(trade);
await this.arbitrageBalanceService.addNewTrade(pureTrade, trade.arbitrageId);
await this.parser.removeCheckerSentOrders(trade.arbitrageId);
} else {
this.checkOrder(trade.arbitrageId, trade.typeOrder, trade.exchange);
}
}
async sendOrdersFromPromise(newOrder: any) {
console.log(' 125sendOrders FromPromise call :sendOrdersToBot');
await this.sendOrdersToBot(newOrder, 'taker');
}
generateOrderAfterCancel(message: any) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
this.orderService.updateStatus | trade.typeOrder, trade.exchOrderId, 'cancelled', '');
const order: Order[] = this.parser.replaceCancelledOrderByNewOrder(trade);
if (order) {
console.log('generateOrderAfterCancel call sendOrdersToBot :');
this.sendOrdersToBot(order);
}
}
}
}
createTcpServer() {
if (!this.server) {
this.startServer();
this.parser.startSaverOrderBooks();
} else if (!this.server.address()) {
this.startServer();
} else {
console.log('the already started');
}
}
// запуск сервера для прослушивания по TCP на порту сообщений
startServer() {
this.server = new net.Server((socket: any) => {
socket.on('message', async (message: any) => {
// console.log('message : %j', message);
if (message.type === 'notification' // && message.payload.method === 'trades' ||
&& message.payload.method === 'partial' || message.payload.method === 'done') {
console.log(' message.payload.method :', message.payload.method);
this.passTradeToDB(message, message.payload.method);
}
if (message.type === 'notification' && message.payload.method === 'resCheckOrder') {
const confirmedOrder = message.payload.params[0];
if (confirmedOrder) {
console.log('confirmedOrder :',
confirmedOrder.arbitrageId, confirmedOrder.status, confirmedOrder.exchOrderId, confirmedOrder.typeOrder);
await this.orderService.updateStatusOrder(confirmedOrder.arbitrageId, confirmedOrder.typeOrder,
confirmedOrder.exchangeId, confirmedOrder.status, '');
if (confirmedOrder.status === 'notSend') {
console.log('!!! confirmedOrder id status :', confirmedOrder.arbitrageId, confirmedOrder.status);
} else {
const trade = this.tradeService.findTrade(confirmedOrder);
if (trade) {
this.countPureTrade(confirmedOrder as Trade);
this.parser.removeCheckerSentOrders(confirmedOrder.arbitrageId);
}
}
}
}
if (message.type === 'notification' && message.payload.method === 'statusOrder') {
console.log('168 ########### status=', message.payload.params[3]);
this.orderService.updateStatusOrder(message.payload.params[0], message.payload.params[1],
message.payload.params[2], message.payload.params[3], message.payload.params[4]);
if (message.payload.params[3] === 'open') {
const trade = {
exchange: '', pair: '', price: '', volume: '', typeOrder: message.payload.params[1],
arbitOrderId: message.payload.params[0], exchOrderId: '', time: ''
};
// this.checkOrder(trade);
}
/* if (message.payload.params[3] === 'done') {
this.passTradeToDB(message, message.payload.params[3]);
} */
if (message.payload.params[3] === 'cancelled') {
this.generateOrderAfterCancel(message);
}
}
else {
const parsedMessage = this.parser.parseTcpMessage(message);
this.parser.calculateAskBid(parsedMessage);
const newOrderBook = this.parser.addNewOrderBookData();
// console.log('newOrderBook :', newOrderBook);
if (this.avalableArbitrage) {
if (this.startFlag) {
const orders = await this.parser.defineSellBuy(newOrderBook);
console.log('this.startFlag :', this.startFlag, this.avalableArbitrage);
this.sendOrdersToBot(orders);
this.startFlag = false;
}
}
}
});
});
this.server.listen(SERVER_CONFIG.tcpPort);
console.log(`Tcp server listen port ${SERVER_CONFIG.tcpPort}`);
// Enable authentication for server
this.server.getAuthenticator = () => {
return (signature: string) => auth.verify(signature);
};
}
private checkOrder(arbitId: string, orderType: string, exchange: string) {
return this.orderService.findOrderByIdExchange(arbitId, orderType, exchange)
.then((order) => {
if (order) {
const checkingOrder = {
nameOrder: 'checkOrder', order: { arbitrageId: arbitId, pairOrder: order.pair, typeOrder: orderType },
serverPort: order.port, host: order.host,
};
this.startClient(checkingOrder);
}
});
}
stopTcpServer() {
this.server.close();
console.log('Tcp server stoped');
}
createClient(clientSocket: any) {
const newClientTcp = new net.Client();
this.clientsTcp.push({ socket: clientSocket, client: newClientTcp });
newClientTcp.getSignature = () => {
return auth.sign({ id: 'clientIdxxx' });
};
newClientTcp.connect(clientSocket);
return newClientTcp;
}
getSpread() {
return this.parser.getBidAskSpread();
}
async getCurrentArbitrage() {
// получаем данные для фронта для незакрытых во 2 полуцикле сделок
const arbitrage = await this.parser.getNotClosedArbitrage();
const timeAfterSent = Date.now() - 60000;
const sentOpenOrders = await this.orderService.getSameStatusOrder('open', timeAfterSent);
if (sentOpenOrders.length) {
for (const sentOpenOrder of sentOpenOrders) {
// console.log('251check Order :', sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder);
this.checkOrder(sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder, sentOpenOrder.exchange);
}
}
// проверяем список отправленных но не подтвержденных ордеров 2 полуцикла
const notConfirmedOrders = this.parser.expiredTimeSendNotConfirmOrders(Date.now());
if (notConfirmedOrders.length) {
for (const secondOrders of notConfirmedOrders) {
this.checkOrder(secondOrders.arbitId, 'sell', secondOrders.secondBuyExchange);
this.checkOrder(secondOrders.arbitId, 'buy', secondOrders.secondSellExchange);
// получаем список closeSecondOrders с ордерами по которым
// не было подтверждения сделки в течение 1 мин
this.parser.setCurrentTimeSentOrder(Date.now(), secondOrders.arbitId);
// this.parser.closeSecondOrders = [];
}
}
if (this.parser.closeSecondOrders.length && arbitrage.length) {
// если список closeSecondOrders есть то повторно отправляем неподтвержденные ордера на зенбот
console.log('269 sent new order :');
this.sendOrdersToBot(this.parser.closeSecondOrders, 'taker');
// сразу обнуляем список closeSecondOrders
this.parser.closeSecondOrders = [];
}
return arbitrage;
}
sendOrdersToBot(orders: Order[], feeType?: string) {
let parametersOrder;
// console.log('============orders :', orders);
if (orders) {
for (const currentOrder of orders) {
if (!feeType) {
parametersOrder = {
nameOrder: 'sendOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
} else {
parametersOrder = {
nameOrder: 'sendTakerOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
}
if (parametersOrder.order.price > 0) {
this.startClient(parametersOrder);
this.orderService.addNewOrder(currentOrder);
}
}
}
}
async startClient(order: any) {
try {
if (order.host && order.serverPort) {
const clientSocket = `tcp://${order.host}:${order.serverPort}`;
let currentClient = this.defineTcpClient(clientSocket);
if (!currentClient) {
currentClient = this.createClient(clientSocket);
}
currentClient.on('error', (err: any) => {
if (err.code === 'ETIMEDOUT') {
currentClient.destroy();
}
currentClient.reconnect();
});
const stringOrder = JSON.stringify(order.order);
// console.log('321 order.nameOrder :', order.nameOrder);
const countSentOpenOrders = await this.orderService.checkOpenOrders(order.order.arbitrageId, 'open', order.order.typeOrder);
if (order.nameOrder === 'checkOrder') {
// console.log('countSentOpenOrders status', countSentOpenOrders, order.nameOrder);
currentClient.notification(order.nameOrder, [`${stringOrder}`]);
} else if (!countSentOpenOrders) {
console.log(' Sent Orders :', countSentOpenOrders, order.order.status);
await currentClient.notification(order.nameOrder, [`${stringOrder}`]);
await this.parser.changeStatusInSentOrders(order.order.arbitrageId, order.order.typeOrder);
}
}
} catch (e) {
console.log('err :', e);
}
}
defineTcpClient(socketTcp: any): any {
if (this.clientsTcp) {
for (const iterator of this.clientsTcp) {
if (iterator.socket === socketTcp) {
return iterator.client;
}
}
}
}
getCurrentPrice(): ExchangeData[] {
return this.parser.getCurrentPrice();
}
getExchangePrice(exchange: string, pair: string, typePrice: string) {
return this.parser.getExchangePrice(exchange, pair, typePrice);
}
}
| Order(trade.arbitrageId, | identifier_name |
server-tcp.ts | import { MatrixService } from './../db/matrix/matrix.service';
import { Trade } from './../common/models/trade';
import { ExchangeService } from './../db/exchange/exchange.service';
import { Order } from './../common/models/order';
const net = require('toa-net');
import { Parser } from './parser';
import { OrderBookService } from './../db/orderBook/orderBook.service';
import { OrderService } from './../db/order/order.service';
import { Controller } from '@nestjs/common';
import { ClientTcp } from './client-tcp';
import { TradeService } from './../db/trade/trade.service';
import { StateTrading } from './../common/models/stateTrading';
import { ExchangeData } from './../common/models/exchangeData';
import { SERVER_CONFIG } from './../../server.constants';
import { ArbitrageBalanceService } from '../db/arbit-balance/arbit-balance.service';
import { PureTrade } from '../common/models/pureTrade';
import { setInterval } from 'timers';
import { Order5BookService } from '../db/orderBook_5/orderBook_5.service';
const auth = new net.Auth('secretxxx');
@Controller()
export class ServerTcpBot {
server: any;
parser: Parser;
clientsTcp: ClientTcp[] = [];
stateTrading: StateTrading[] = [];
startFlag = true;
// флаг который с фронта управляет возможностью совершения сделок
avalableArbitrage = false;
constructor(
private readonly orderBooksService: OrderBookService,
private readonly order5BooksService: Order5BookService,
private readonly orderService: OrderService,
private readonly tradeService: TradeService,
private readonly exchangeService: ExchangeService,
private readonly arbitrageBalanceService: ArbitrageBalanceService,
private readonly matrixService: MatrixService) {
this.parser = new Parser(this.orderBooksService, this.order5BooksService, this.exchangeService, this.arbitrageBalanceService,
this.tradeService, this.matrixService);
}
// запрещает ведение новых арбитражных сделок
stopArbitrage() {
this.avalableArbitrage = false;
console.log('Arbitrage stoped');
}
// разрешает ведение новых арбитражных сделок
startArbitrage() {
this.avalableArbitrage = true;
console.log('Arbitrage started');
}
// получаем сделки с 1 полуцикла
async getFirstTradeArbitrage(id: string) {
const arbitrage = await this.arbitrageBalanceService.findArbitrageById(id);
return [arbitrage.firstCickleSell, arbitrage.firstCickleBuy];
}
// вызов с фронта закрытия 2 - го полуцикла
async closeSecondArbitrage(trades: PureTrade[], arbitId: string) {
const orders = await this.parser.closeSecondArbitrage(trades, arbitId);
// console.log('==============object :', orders);
if (orders.length) {
console.log(' ++++++++++++++++closeSecondArbitrage call :sendOrdersToBot');
this.sendOrdersToBot(orders, 'taker');
this.arbitrageBalanceService.closeSecondCircleStatus(arbitId);
}
}
async passTradeToDB(message: any, status: string) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
// const status = (trade.remainingSize === 0) ? 'done' : 'partial';
if (trade) {
await this.combineTrade(trade, status);
}
}
}
}
async combineTrade(trade: Trade, status?: string) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, status, '');
this.parser.subTradedVolume(trade);
this.tradeService.addNewData(trade);
this.countPureTrade(trade);
let newOrder;
// console.log('trade.arbitID :', trade.arbitrageId, trade.typeOrder);
if (this.parser.orderFullFilled(trade)) {
const newOrderBook = this.parser.addNewOrderBookData();
// if (this.avalableArbitrage) {
newOrder = await this.parser.defineSellBuy(newOrderBook);
console.log(' 80parser.defineSellBuy:', newOrder);
// this.parser.removeCheckerSentOrders(trade.arbitrageId);
// }
} else {
if (this.avalableArbitrage) {
console.log('85this.parser.makePartialOrder:');
newOrder = await this.parser.makePartialOrder(trade);
}
}
if (newOrder) {
console.log(' 91 sendOrdersToBot( :');
await this.sendOrdersFromPromise(newOrder);
}
}
async countPureTrade(trade: Trade) {
const sameTrade = await this.tradeService.findTrade(trade);
if (!sameTrade) {
console.log('sameTrade :', sameTrade);
const pureTrade = await this.parser.addNewArbitrageTrade(trade);
await this.arbitrageBalanceService.addNewTrade(pureTrade, trade.arbitrageId);
await this.parser.removeCheckerSentOrders(trade.arbitrageId);
} else {
this.checkOrder(trade.arbitrageId, trade.typeOrder, trade.exchange);
}
}
async sendOrdersFromPromise(newOrder: any) {
console.log(' 125sendOrders FromPromise call :sendOrdersToBot');
await this.sendOrdersToBot(newOrder, 'taker');
}
generateOrderAfterCancel(message: any) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, 'cancelled', '');
const order: Order[] = this.parser.replaceCancelledOrderByNewOrder(trade);
if (order) {
console.log('generateOrderAfterCancel call sendOrdersToBot :');
this.sendOrdersToBot(order);
}
}
}
}
createTcpServer() {
if (!this.server) {
this.startServer();
this.parser.startSaverOrderBooks();
} else if (!this.server.address()) {
this.startServer();
} else {
console.log('the already started');
}
}
// запуск сервера для прослушивания по TCP на порту сообщений
startServer() {
this.server = new net.Server((socket: any) => {
socket.on('message', async (message: any) => {
// console.log('message : %j', message);
if (message.type === 'notification' // && message.payload.method === 'trades' ||
&& message.payload.method === 'partial' || message.payload.method === 'done') {
console.log(' message.payload.method :', message.payload.method);
this.passTradeToDB(message, message.payload.method);
}
if (message.type === 'notification' && message.payload.method === 'resCheckOrder') {
const confirmedOrder = message.payload.params[0];
if (confirmedOrder) {
console.log('confirmedOrder :',
confirmedOrder.arbitrageId, confirmedOrder.status, confirmedOrder.exchOrderId, confirmedOrder.typeOrder);
await this.orderService.updateStatusOrder(confirmedOrder.arbitrageId, confirmedOrder.typeOrder,
confirmedOrder.exchangeId, confirmedOrder.status, '');
if (confirmedOrder.status === 'notSend') {
console.log('!!! confirmedOrder id status :', confirmedOrder.arbitrageId, confirmedOrder.status);
} else {
const trade = this.tradeService.findTrade(confirmedOrder);
if (trade) {
this.countPureTrade(confirmedOrder as Trade);
this.parser.removeCheckerSentOrders(confirmedOrder.arbitrageId);
}
}
}
}
if (message.type === 'notification' && message.payload.method === 'statusOrder') {
console.log('168 ########### status=', message.payload.params[3]);
this.orderService.updateStatusOrder(message.payload.params[0], message.payload.params[1],
message.payload.params[2], message.payload.params[3], message.payload.params[4]);
if (message.payload.params[3] === 'open') {
const trade = {
exchange: '', pair: '', price: '', volume: '', typeOrder: message.payload.params[1],
arbitOrderId: message.payload.params[0], exchOrderId: '', time: ''
};
// this.checkOrder(trade);
}
/* if (message.payload.params[3] === 'done') {
this.passTradeToDB(message, message.payload.params[3]);
} */
if (message.payload.params[3] === 'cancelled') {
this.generateOrderAfterCancel(message);
}
}
else {
const parsedMessage = this.parser.parseTcpMessage(message);
this.parser.calculateAskBid(parsedMessage);
const newOrderBook = this.parser.addNewOrderBookData();
// console.log('newOrderBook :', newOrderBook);
if (this.avalableArbitrage) {
if (this.startFlag) {
const orders = await this.parser.defineSellBuy(newOrderBook);
console.log('this.startFlag :', this.startFlag, this.avalableArbitrage);
this.sendOrdersToBot(orders);
this.startFlag = false;
}
}
}
});
});
this.server.listen(SERVER_CONFIG.tcpPort);
console.log(`Tcp server listen port ${SERVER_CONFIG.tcpPort}`);
// Enable authentication for server
this.server.getAuthenticator = () => {
return (signature: string) => auth.verify(signature);
};
}
private checkOrder(arbitId: string, orderType: string, exchange: string) {
return this.orderService.findOrderByIdExchange(arbitId, orderType, exchange)
.then((order) => {
if (order) {
const checkingOrder = {
nameOrder: 'checkOrder', order: { arbitrageId: arbitId, pairOrder: order.pair, typeOrder: orderType },
serverPort: order.port, host: order.host,
};
this.startClient(checkingOrder);
}
});
}
stopTcpServer() {
this.server.close();
console.log('Tcp server stoped');
}
createClient(clientSocket: any) {
const newClientTcp = new net.Client();
this.clientsTcp.push({ socket: clientSocket, client: newClientTcp });
newClientTcp.getSignature = () => {
return auth.sign({ id: 'clientIdxxx' });
};
newClientTcp.connect(clientSocket);
return newClientTcp;
}
getSpread() {
return this.parser.getBidAskSpread();
}
async getCurrentArbitrage() {
// получаем данные для фронта для незакрытых во 2 полуцикле сделок
const arbitrage = await this.parser.getNotClosedArbitrage();
const timeAfterSent = Date.now() - 60000;
const sentOpenOrders = await this.orderService.getSameStatusOrder('open', timeAfterSent);
if (sentOpenOrders.length) {
for (const sentOpenOrder of sentOpenOrders) {
// console.log('251check Order :', sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder);
this.checkOrder(sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder, sentOpenOrder.exchange);
}
}
// проверяем список отправленных но не подтвержденных ордеров 2 полуцикла
const notConfirmedOrders = this.parser.expiredTimeSendNotConfirmOrders(Date.now());
if (notConfirmedOrders.length) {
for (const secondOrders of notConfirmedOrders) {
this.checkOrder(secondOrders.arbitId, 'sell', secondOrders.secondBuyExchange);
this.checkOrder(secondOrders.arbitId, 'buy', secondOrders.secondSellExchange);
// получаем список closeSecondOrders с ордерами по которым
// не было подтверждения сделки в течение 1 мин
this.parser.setCurrentTimeSentOrder(Date.now(), secondOrders.arbitId);
// this.parser.closeSecondOrders = [];
}
}
if (this.parser.closeSecondOrders.length && arbitrage.length) {
// если список closeSecondOrders есть то повторно отправляем неподтвержденные ордера на зенбот
console.log('269 sent new order :');
this.sendOrdersToBot(this.parser.closeSecondOrders, 'taker');
// сразу обнуляем список closeSecondOrders
this.parser.closeSecondOrders = [];
}
return arbitrage;
}
sendOrdersToBot(orders: Order[], feeType?: string) {
let parametersOrder;
// console.log('============orders :', orders);
if (orders) {
for (const currentOrder of orders) {
if (!feeType) {
parametersOrder = {
nameOrder: 'sendOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
} else {
parametersOrder = {
nameOrder: 'sendTakerOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
}
if (parametersOrder.order.price > 0) {
this.startClient(parametersOrder);
this.orderService.addNewOrder(currentOrder);
}
}
}
}
async startClient(order: any) {
try {
if (order.host && order.serverPort) {
const clientSocket = `tcp://${order.host}:${order.serverPort}`;
let currentClient = this.defineTcpClient(clientSocket);
if (!currentClient) {
currentClient = this.createClient(clientSocket);
}
currentClient.on('error', (err: any) => {
if (err.code === 'ETIMEDOUT') {
currentClient.destroy();
}
currentClient.reconnect();
});
const stringOrder = JSON.stringify(order.order);
// console.log('321 order.nameOrder :', order.nameOrder);
const countSentOpenOrders = await this.orderService.checkOpenOrders(order.order.arbitrageId, 'open', order.order.typeOrder);
if (order.nameOrder === 'checkOrder') {
// console.log('countSentOpenOrders status', countSentOpenOrders, order.nameOrder);
currentClient.notification(order.nameOrder, [`${stringOrder}`]);
} else if (!countSentOpenOrders) {
console.log(' Sent Orders :', countSentOpenOrders, order.order.status);
await currentClient.notification(order.nameOrder, [`${stringOrder}`]);
await this.parser.changeStatusInSentOrders(order.order.arbitrageId, order.order.typeOrder);
}
}
} catch (e) {
console.log('err :', e);
}
}
defineTcpClient(socketTcp: any): any {
if (this.clientsTcp) {
for (const iterator of this.clientsTcp) {
if (iterator.socket === socketTcp) {
return iterator.client;
}
}
}
}
getCurrentPrice(): ExchangeData[] {
return this.parser.getCurrentPrice();
}
getExchangePrice(exchange: string, pair: string, typePrice: string) {
return this.parser.getExchangePrice(exchange, pair, typePrice);
}
}
| identifier_body | ||
server-tcp.ts | import { MatrixService } from './../db/matrix/matrix.service';
import { Trade } from './../common/models/trade';
import { ExchangeService } from './../db/exchange/exchange.service';
import { Order } from './../common/models/order';
const net = require('toa-net');
import { Parser } from './parser';
import { OrderBookService } from './../db/orderBook/orderBook.service';
import { OrderService } from './../db/order/order.service';
import { Controller } from '@nestjs/common';
import { ClientTcp } from './client-tcp';
import { TradeService } from './../db/trade/trade.service';
import { StateTrading } from './../common/models/stateTrading';
import { ExchangeData } from './../common/models/exchangeData';
import { SERVER_CONFIG } from './../../server.constants';
import { ArbitrageBalanceService } from '../db/arbit-balance/arbit-balance.service';
import { PureTrade } from '../common/models/pureTrade';
import { setInterval } from 'timers';
import { Order5BookService } from '../db/orderBook_5/orderBook_5.service';
const auth = new net.Auth('secretxxx');
@Controller()
export class ServerTcpBot {
server: any;
parser: Parser;
clientsTcp: ClientTcp[] = [];
stateTrading: StateTrading[] = [];
startFlag = true;
// флаг который с фронта управляет возможностью совершения сделок
avalableArbitrage = false;
constructor(
private readonly orderBooksService: OrderBookService,
private readonly order5BooksService: Order5BookService,
private readonly orderService: OrderService,
private readonly tradeService: TradeService,
private readonly exchangeService: ExchangeService,
private readonly arbitrageBalanceService: ArbitrageBalanceService,
private readonly matrixService: MatrixService) {
this.parser = new Parser(this.orderBooksService, this.order5BooksService, this.exchangeService, this.arbitrageBalanceService,
this.tradeService, this.matrixService);
}
// запрещает ведение новых арбитражных сделок
stopArbitrage() {
this.avalableArbitrage = false;
console.log('Arbitrage stoped');
}
// разрешает ведение новых арбитражных сделок
startArbitrage() {
this.avalableArbitrage = true;
console.log('Arbitrage started');
}
// получаем сделки с 1 полуцикла
async getFirstTradeArbitrage(id: string) {
const arbitrage = await this.arbitrageBalanceService.findArbitrageById(id);
return [arbitrage.firstCickleSell, arbitrage.firstCickleBuy];
}
// вызов с фронта закрытия 2 - го полуцикла
async closeSecondArbitrage(trades: PureTrade[], arbitId: string) {
const orders = await this.parser.closeSecondArbitrage(trades, arbitId);
// console.log('==============object :', orders);
if (orders.length) {
console.log(' ++++++++++++++++closeSecondArbitrage call :sendOrdersToBot');
this.sendOrdersToBot(orders, 'taker');
this.arbitrageBalanceService.closeSecondCircleStatus(arbitId);
}
}
async passTradeToDB(message: any, status: string) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
// const status = (trade.remainingSize === 0) ? 'done' : 'partial';
if (trade) {
await this.combineTrade(trade, status);
}
}
}
}
async combineTrade(trade: Trade, status?: string) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, status, '');
this.parser.subTradedVolume(trade);
this.tradeService.addNewData(trade);
this.countPureTrade(trade);
let newOrder;
// console.log('trade.arbitID :', trade.arbitrageId, trade.typeOrder);
if (this.parser.orderFullFilled(trade)) {
const newOrderBook = this.parser.addNewOrderBookData();
// if (this.avalableArbitrage) {
newOrder = await this.parser.defineSellBuy(newOrderBook);
console.log(' 80parser.defineSellBuy:', newOrder);
// this.parser.removeCheckerSentOrders(trade.arbitrageId);
// }
} else {
if (this.avalableArbitrage) {
console.log('85this.parser.makePartialOrder:');
newOrder = await this.parser.makePartialOrder(trade);
}
}
if (newOrder | = await this.tradeService.findTrade(trade);
if (!sameTrade) {
console.log('sameTrade :', sameTrade);
const pureTrade = await this.parser.addNewArbitrageTrade(trade);
await this.arbitrageBalanceService.addNewTrade(pureTrade, trade.arbitrageId);
await this.parser.removeCheckerSentOrders(trade.arbitrageId);
} else {
this.checkOrder(trade.arbitrageId, trade.typeOrder, trade.exchange);
}
}
async sendOrdersFromPromise(newOrder: any) {
console.log(' 125sendOrders FromPromise call :sendOrdersToBot');
await this.sendOrdersToBot(newOrder, 'taker');
}
generateOrderAfterCancel(message: any) {
const trades = this.parser.parseTrades(message);
if (trades) {
for (const trade of trades) {
this.orderService.updateStatusOrder(trade.arbitrageId, trade.typeOrder, trade.exchOrderId, 'cancelled', '');
const order: Order[] = this.parser.replaceCancelledOrderByNewOrder(trade);
if (order) {
console.log('generateOrderAfterCancel call sendOrdersToBot :');
this.sendOrdersToBot(order);
}
}
}
}
createTcpServer() {
if (!this.server) {
this.startServer();
this.parser.startSaverOrderBooks();
} else if (!this.server.address()) {
this.startServer();
} else {
console.log('the already started');
}
}
// запуск сервера для прослушивания по TCP на порту сообщений
startServer() {
this.server = new net.Server((socket: any) => {
socket.on('message', async (message: any) => {
// console.log('message : %j', message);
if (message.type === 'notification' // && message.payload.method === 'trades' ||
&& message.payload.method === 'partial' || message.payload.method === 'done') {
console.log(' message.payload.method :', message.payload.method);
this.passTradeToDB(message, message.payload.method);
}
if (message.type === 'notification' && message.payload.method === 'resCheckOrder') {
const confirmedOrder = message.payload.params[0];
if (confirmedOrder) {
console.log('confirmedOrder :',
confirmedOrder.arbitrageId, confirmedOrder.status, confirmedOrder.exchOrderId, confirmedOrder.typeOrder);
await this.orderService.updateStatusOrder(confirmedOrder.arbitrageId, confirmedOrder.typeOrder,
confirmedOrder.exchangeId, confirmedOrder.status, '');
if (confirmedOrder.status === 'notSend') {
console.log('!!! confirmedOrder id status :', confirmedOrder.arbitrageId, confirmedOrder.status);
} else {
const trade = this.tradeService.findTrade(confirmedOrder);
if (trade) {
this.countPureTrade(confirmedOrder as Trade);
this.parser.removeCheckerSentOrders(confirmedOrder.arbitrageId);
}
}
}
}
if (message.type === 'notification' && message.payload.method === 'statusOrder') {
console.log('168 ########### status=', message.payload.params[3]);
this.orderService.updateStatusOrder(message.payload.params[0], message.payload.params[1],
message.payload.params[2], message.payload.params[3], message.payload.params[4]);
if (message.payload.params[3] === 'open') {
const trade = {
exchange: '', pair: '', price: '', volume: '', typeOrder: message.payload.params[1],
arbitOrderId: message.payload.params[0], exchOrderId: '', time: ''
};
// this.checkOrder(trade);
}
/* if (message.payload.params[3] === 'done') {
this.passTradeToDB(message, message.payload.params[3]);
} */
if (message.payload.params[3] === 'cancelled') {
this.generateOrderAfterCancel(message);
}
}
else {
const parsedMessage = this.parser.parseTcpMessage(message);
this.parser.calculateAskBid(parsedMessage);
const newOrderBook = this.parser.addNewOrderBookData();
// console.log('newOrderBook :', newOrderBook);
if (this.avalableArbitrage) {
if (this.startFlag) {
const orders = await this.parser.defineSellBuy(newOrderBook);
console.log('this.startFlag :', this.startFlag, this.avalableArbitrage);
this.sendOrdersToBot(orders);
this.startFlag = false;
}
}
}
});
});
this.server.listen(SERVER_CONFIG.tcpPort);
console.log(`Tcp server listen port ${SERVER_CONFIG.tcpPort}`);
// Enable authentication for server
this.server.getAuthenticator = () => {
return (signature: string) => auth.verify(signature);
};
}
private checkOrder(arbitId: string, orderType: string, exchange: string) {
return this.orderService.findOrderByIdExchange(arbitId, orderType, exchange)
.then((order) => {
if (order) {
const checkingOrder = {
nameOrder: 'checkOrder', order: { arbitrageId: arbitId, pairOrder: order.pair, typeOrder: orderType },
serverPort: order.port, host: order.host,
};
this.startClient(checkingOrder);
}
});
}
stopTcpServer() {
this.server.close();
console.log('Tcp server stoped');
}
createClient(clientSocket: any) {
const newClientTcp = new net.Client();
this.clientsTcp.push({ socket: clientSocket, client: newClientTcp });
newClientTcp.getSignature = () => {
return auth.sign({ id: 'clientIdxxx' });
};
newClientTcp.connect(clientSocket);
return newClientTcp;
}
getSpread() {
return this.parser.getBidAskSpread();
}
async getCurrentArbitrage() {
// получаем данные для фронта для незакрытых во 2 полуцикле сделок
const arbitrage = await this.parser.getNotClosedArbitrage();
const timeAfterSent = Date.now() - 60000;
const sentOpenOrders = await this.orderService.getSameStatusOrder('open', timeAfterSent);
if (sentOpenOrders.length) {
for (const sentOpenOrder of sentOpenOrders) {
// console.log('251check Order :', sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder);
this.checkOrder(sentOpenOrder.arbitrageId, sentOpenOrder.typeOrder, sentOpenOrder.exchange);
}
}
// проверяем список отправленных но не подтвержденных ордеров 2 полуцикла
const notConfirmedOrders = this.parser.expiredTimeSendNotConfirmOrders(Date.now());
if (notConfirmedOrders.length) {
for (const secondOrders of notConfirmedOrders) {
this.checkOrder(secondOrders.arbitId, 'sell', secondOrders.secondBuyExchange);
this.checkOrder(secondOrders.arbitId, 'buy', secondOrders.secondSellExchange);
// получаем список closeSecondOrders с ордерами по которым
// не было подтверждения сделки в течение 1 мин
this.parser.setCurrentTimeSentOrder(Date.now(), secondOrders.arbitId);
// this.parser.closeSecondOrders = [];
}
}
if (this.parser.closeSecondOrders.length && arbitrage.length) {
// если список closeSecondOrders есть то повторно отправляем неподтвержденные ордера на зенбот
console.log('269 sent new order :');
this.sendOrdersToBot(this.parser.closeSecondOrders, 'taker');
// сразу обнуляем список closeSecondOrders
this.parser.closeSecondOrders = [];
}
return arbitrage;
}
sendOrdersToBot(orders: Order[], feeType?: string) {
let parametersOrder;
// console.log('============orders :', orders);
if (orders) {
for (const currentOrder of orders) {
if (!feeType) {
parametersOrder = {
nameOrder: 'sendOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
} else {
parametersOrder = {
nameOrder: 'sendTakerOrder',
serverPort: currentOrder.port, host: currentOrder.host,
order: currentOrder,
};
}
if (parametersOrder.order.price > 0) {
this.startClient(parametersOrder);
this.orderService.addNewOrder(currentOrder);
}
}
}
}
async startClient(order: any) {
try {
if (order.host && order.serverPort) {
const clientSocket = `tcp://${order.host}:${order.serverPort}`;
let currentClient = this.defineTcpClient(clientSocket);
if (!currentClient) {
currentClient = this.createClient(clientSocket);
}
currentClient.on('error', (err: any) => {
if (err.code === 'ETIMEDOUT') {
currentClient.destroy();
}
currentClient.reconnect();
});
const stringOrder = JSON.stringify(order.order);
// console.log('321 order.nameOrder :', order.nameOrder);
const countSentOpenOrders = await this.orderService.checkOpenOrders(order.order.arbitrageId, 'open', order.order.typeOrder);
if (order.nameOrder === 'checkOrder') {
// console.log('countSentOpenOrders status', countSentOpenOrders, order.nameOrder);
currentClient.notification(order.nameOrder, [`${stringOrder}`]);
} else if (!countSentOpenOrders) {
console.log(' Sent Orders :', countSentOpenOrders, order.order.status);
await currentClient.notification(order.nameOrder, [`${stringOrder}`]);
await this.parser.changeStatusInSentOrders(order.order.arbitrageId, order.order.typeOrder);
}
}
} catch (e) {
console.log('err :', e);
}
}
defineTcpClient(socketTcp: any): any {
if (this.clientsTcp) {
for (const iterator of this.clientsTcp) {
if (iterator.socket === socketTcp) {
return iterator.client;
}
}
}
}
getCurrentPrice(): ExchangeData[] {
return this.parser.getCurrentPrice();
}
getExchangePrice(exchange: string, pair: string, typePrice: string) {
return this.parser.getExchangePrice(exchange, pair, typePrice);
}
}
| ) {
console.log(' 91 sendOrdersToBot( :');
await this.sendOrdersFromPromise(newOrder);
}
}
async countPureTrade(trade: Trade) {
const sameTrade | conditional_block |
projection_pushing.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"fmt"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
// pushProjection pushes a projection to the plan.
func pushProjection(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
plan logicalPlan,
inner, reuseCol, hasAggregation bool,
) (offset int, added bool, err error) {
switch node := plan.(type) {
case *limit, *projection, *pulloutSubquery, *distinct, *filter:
// All of these either push to the single source, or push to the LHS
src := node.Inputs()[0]
return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation)
case *route:
return addExpressionToRoute(ctx, node, expr, reuseCol)
case *hashJoin:
return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *join:
return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *simpleProjection:
return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol)
case *orderedAggregate:
return pushProjectionIntoOA(ctx, expr, node, inner, hasAggregation)
case *vindexFunc:
return pushProjectionIntoVindexFunc(node, expr, reuseCol)
case *semiJoin:
return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation)
case *concatenate:
return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol)
default:
return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node))
}
}
func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
colsBefore := len(node.eVindexFunc.Cols)
i, err := node.SupplyProjection(expr, reuseCol)
if err != nil {
return 0, false, err
}
return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil
}
func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenate, inner bool, reuseCol bool) (int, bool, error) {
if hasAggregation {
return 0, false, vterrors.VT12001("aggregation on UNIONs")
}
offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr)))
}
if added {
for _, source := range node.sources[1:] {
_, _, err := pushProjection(ctx, expr, source, inner, reuseCol, hasAggregation)
if err != nil { | }
func pushProjectionIntoSemiJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
reuseCol bool,
node *semiJoin,
inner, hasAggregation bool,
) (int, bool, error) {
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
offset, added, err := pushProjection(ctx, expr, node.lhs, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column := -(offset + 1)
if reuseCol && !added {
for idx, col := range node.cols {
if column == col {
return idx, false, nil
}
}
}
node.cols = append(node.cols, column)
return len(node.cols) - 1, true, nil
}
func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) {
colName, isColName := expr.Expr.(*sqlparser.ColName)
for _, aggregate := range node.aggregates {
if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) {
return aggregate.Col, false, nil
}
if isColName && colName.Name.EqualString(aggregate.Alias) {
return aggregate.Col, false, nil
}
}
for _, key := range node.groupByKeys {
if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) {
return key.KeyCol, false, nil
}
}
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
aggr := engine.NewAggregateParam(popcode.AggregateAnyValue, offset, expr.ColumnName())
aggr.Expr = expr.Expr
aggr.Original = expr
node.aggregates = append(node.aggregates, aggr)
return offset, true, nil
}
func pushProjectionIntoSimpleProj(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *simpleProjection,
inner, hasAggregation, reuseCol bool,
) (int, bool, error) {
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
for i, value := range node.eSimpleProj.Cols {
// we return early if we already have the column in the simple projection's
// output list so we do not add it again.
if reuseCol && value == offset {
return i, false, nil
}
}
node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, offset)
return len(node.eSimpleProj.Cols) - 1, true, nil
}
func pushProjectionIntoJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *join,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
// now we break the expression into left and right side dependencies and rewrite the left ones to bind variables
joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
if err != nil {
return 0, false, err
}
// go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map.
// It is okay to reuse the columns on the left side since
// the final expression which will be selected will be pushed into the right side.
for i, col := range joinCol.LHSExprs {
colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false)
if err != nil {
return 0, false, err
}
node.Vars[joinCol.BvNames[i]] = colOffset
}
// push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not.
expr.Expr = joinCol.RHSExpr
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func pushProjectionIntoHashJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *hashJoin,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
return 0, false, vterrors.VT12001("hash join with projection from both sides of the join")
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *route, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
if reuseCol {
if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 {
return i, false, nil
}
}
sqlparser.RemoveKeyspaceFromColName(expr.Expr)
sel, isSel := rb.Select.(*sqlparser.Select)
if !isSel {
return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select))
}
if ctx.RewriteDerivedExpr {
// if we are trying to push a projection that belongs to a DerivedTable
// we rewrite that expression, so it matches the column name used inside
// that derived table.
err := rewriteProjectionOfDerivedTable(expr, ctx.SemTable)
if err != nil {
return 0, false, err
}
}
offset := len(sel.SelectExprs)
sel.SelectExprs = append(sel.SelectExprs, expr)
return offset, true, nil
}
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
if isDerivedTable {
expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
}
return nil
} | return 0, false, err
}
}
}
return offset, added, nil | random_line_split |
projection_pushing.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"fmt"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
// pushProjection pushes a projection to the plan.
func pushProjection(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
plan logicalPlan,
inner, reuseCol, hasAggregation bool,
) (offset int, added bool, err error) {
switch node := plan.(type) {
case *limit, *projection, *pulloutSubquery, *distinct, *filter:
// All of these either push to the single source, or push to the LHS
src := node.Inputs()[0]
return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation)
case *route:
return addExpressionToRoute(ctx, node, expr, reuseCol)
case *hashJoin:
return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *join:
return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *simpleProjection:
return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol)
case *orderedAggregate:
return pushProjectionIntoOA(ctx, expr, node, inner, hasAggregation)
case *vindexFunc:
return pushProjectionIntoVindexFunc(node, expr, reuseCol)
case *semiJoin:
return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation)
case *concatenate:
return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol)
default:
return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node))
}
}
func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
colsBefore := len(node.eVindexFunc.Cols)
i, err := node.SupplyProjection(expr, reuseCol)
if err != nil {
return 0, false, err
}
return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil
}
func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenate, inner bool, reuseCol bool) (int, bool, error) {
if hasAggregation {
return 0, false, vterrors.VT12001("aggregation on UNIONs")
}
offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr)))
}
if added {
for _, source := range node.sources[1:] {
_, _, err := pushProjection(ctx, expr, source, inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
}
}
return offset, added, nil
}
func pushProjectionIntoSemiJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
reuseCol bool,
node *semiJoin,
inner, hasAggregation bool,
) (int, bool, error) {
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
offset, added, err := pushProjection(ctx, expr, node.lhs, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column := -(offset + 1)
if reuseCol && !added {
for idx, col := range node.cols {
if column == col {
return idx, false, nil
}
}
}
node.cols = append(node.cols, column)
return len(node.cols) - 1, true, nil
}
func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) {
colName, isColName := expr.Expr.(*sqlparser.ColName)
for _, aggregate := range node.aggregates {
if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) {
return aggregate.Col, false, nil
}
if isColName && colName.Name.EqualString(aggregate.Alias) {
return aggregate.Col, false, nil
}
}
for _, key := range node.groupByKeys {
if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) {
return key.KeyCol, false, nil
}
}
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
aggr := engine.NewAggregateParam(popcode.AggregateAnyValue, offset, expr.ColumnName())
aggr.Expr = expr.Expr
aggr.Original = expr
node.aggregates = append(node.aggregates, aggr)
return offset, true, nil
}
func pushProjectionIntoSimpleProj(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *simpleProjection,
inner, hasAggregation, reuseCol bool,
) (int, bool, error) {
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
for i, value := range node.eSimpleProj.Cols {
// we return early if we already have the column in the simple projection's
// output list so we do not add it again.
if reuseCol && value == offset {
return i, false, nil
}
}
node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, offset)
return len(node.eSimpleProj.Cols) - 1, true, nil
}
func pushProjectionIntoJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *join,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
// now we break the expression into left and right side dependencies and rewrite the left ones to bind variables
joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
if err != nil {
return 0, false, err
}
// go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map.
// It is okay to reuse the columns on the left side since
// the final expression which will be selected will be pushed into the right side.
for i, col := range joinCol.LHSExprs {
colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false)
if err != nil {
return 0, false, err
}
node.Vars[joinCol.BvNames[i]] = colOffset
}
// push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not.
expr.Expr = joinCol.RHSExpr
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func pushProjectionIntoHashJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *hashJoin,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
return 0, false, vterrors.VT12001("hash join with projection from both sides of the join")
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *route, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) |
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
if isDerivedTable {
expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
}
return nil
}
| {
if reuseCol {
if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 {
return i, false, nil
}
}
sqlparser.RemoveKeyspaceFromColName(expr.Expr)
sel, isSel := rb.Select.(*sqlparser.Select)
if !isSel {
return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select))
}
if ctx.RewriteDerivedExpr {
// if we are trying to push a projection that belongs to a DerivedTable
// we rewrite that expression, so it matches the column name used inside
// that derived table.
err := rewriteProjectionOfDerivedTable(expr, ctx.SemTable)
if err != nil {
return 0, false, err
}
}
offset := len(sel.SelectExprs)
sel.SelectExprs = append(sel.SelectExprs, expr)
return offset, true, nil
} | identifier_body |
projection_pushing.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"fmt"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
// pushProjection pushes a projection to the plan.
func pushProjection(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
plan logicalPlan,
inner, reuseCol, hasAggregation bool,
) (offset int, added bool, err error) {
switch node := plan.(type) {
case *limit, *projection, *pulloutSubquery, *distinct, *filter:
// All of these either push to the single source, or push to the LHS
src := node.Inputs()[0]
return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation)
case *route:
return addExpressionToRoute(ctx, node, expr, reuseCol)
case *hashJoin:
return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *join:
return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *simpleProjection:
return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol)
case *orderedAggregate:
return pushProjectionIntoOA(ctx, expr, node, inner, hasAggregation)
case *vindexFunc:
return pushProjectionIntoVindexFunc(node, expr, reuseCol)
case *semiJoin:
return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation)
case *concatenate:
return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol)
default:
return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node))
}
}
func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
colsBefore := len(node.eVindexFunc.Cols)
i, err := node.SupplyProjection(expr, reuseCol)
if err != nil {
return 0, false, err
}
return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil
}
func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenate, inner bool, reuseCol bool) (int, bool, error) {
if hasAggregation {
return 0, false, vterrors.VT12001("aggregation on UNIONs")
}
offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr)))
}
if added {
for _, source := range node.sources[1:] {
_, _, err := pushProjection(ctx, expr, source, inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
}
}
return offset, added, nil
}
func pushProjectionIntoSemiJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
reuseCol bool,
node *semiJoin,
inner, hasAggregation bool,
) (int, bool, error) {
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
offset, added, err := pushProjection(ctx, expr, node.lhs, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column := -(offset + 1)
if reuseCol && !added {
for idx, col := range node.cols {
if column == col {
return idx, false, nil
}
}
}
node.cols = append(node.cols, column)
return len(node.cols) - 1, true, nil
}
func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) {
colName, isColName := expr.Expr.(*sqlparser.ColName)
for _, aggregate := range node.aggregates {
if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) {
return aggregate.Col, false, nil
}
if isColName && colName.Name.EqualString(aggregate.Alias) {
return aggregate.Col, false, nil
}
}
for _, key := range node.groupByKeys {
if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) {
return key.KeyCol, false, nil
}
}
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
aggr := engine.NewAggregateParam(popcode.AggregateAnyValue, offset, expr.ColumnName())
aggr.Expr = expr.Expr
aggr.Original = expr
node.aggregates = append(node.aggregates, aggr)
return offset, true, nil
}
func pushProjectionIntoSimpleProj(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *simpleProjection,
inner, hasAggregation, reuseCol bool,
) (int, bool, error) {
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
for i, value := range node.eSimpleProj.Cols {
// we return early if we already have the column in the simple projection's
// output list so we do not add it again.
if reuseCol && value == offset {
return i, false, nil
}
}
node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, offset)
return len(node.eSimpleProj.Cols) - 1, true, nil
}
func pushProjectionIntoJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *join,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
// now we break the expression into left and right side dependencies and rewrite the left ones to bind variables
joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
if err != nil {
return 0, false, err
}
// go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map.
// It is okay to reuse the columns on the left side since
// the final expression which will be selected will be pushed into the right side.
for i, col := range joinCol.LHSExprs {
colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false)
if err != nil {
return 0, false, err
}
node.Vars[joinCol.BvNames[i]] = colOffset
}
// push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not.
expr.Expr = joinCol.RHSExpr
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func pushProjectionIntoHashJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *hashJoin,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil |
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
return 0, false, vterrors.VT12001("hash join with projection from both sides of the join")
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *route, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
if reuseCol {
if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 {
return i, false, nil
}
}
sqlparser.RemoveKeyspaceFromColName(expr.Expr)
sel, isSel := rb.Select.(*sqlparser.Select)
if !isSel {
return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select))
}
if ctx.RewriteDerivedExpr {
// if we are trying to push a projection that belongs to a DerivedTable
// we rewrite that expression, so it matches the column name used inside
// that derived table.
err := rewriteProjectionOfDerivedTable(expr, ctx.SemTable)
if err != nil {
return 0, false, err
}
}
offset := len(sel.SelectExprs)
sel.SelectExprs = append(sel.SelectExprs, expr)
return offset, true, nil
}
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
if isDerivedTable {
expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
}
return nil
}
| {
return 0, false, err
} | conditional_block |
projection_pushing.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"fmt"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
// pushProjection pushes a projection to the plan.
func pushProjection(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
plan logicalPlan,
inner, reuseCol, hasAggregation bool,
) (offset int, added bool, err error) {
switch node := plan.(type) {
case *limit, *projection, *pulloutSubquery, *distinct, *filter:
// All of these either push to the single source, or push to the LHS
src := node.Inputs()[0]
return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation)
case *route:
return addExpressionToRoute(ctx, node, expr, reuseCol)
case *hashJoin:
return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *join:
return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation)
case *simpleProjection:
return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol)
case *orderedAggregate:
return pushProjectionIntoOA(ctx, expr, node, inner, hasAggregation)
case *vindexFunc:
return pushProjectionIntoVindexFunc(node, expr, reuseCol)
case *semiJoin:
return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation)
case *concatenate:
return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol)
default:
return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node))
}
}
func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
colsBefore := len(node.eVindexFunc.Cols)
i, err := node.SupplyProjection(expr, reuseCol)
if err != nil {
return 0, false, err
}
return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil
}
func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenate, inner bool, reuseCol bool) (int, bool, error) {
if hasAggregation {
return 0, false, vterrors.VT12001("aggregation on UNIONs")
}
offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr)))
}
if added {
for _, source := range node.sources[1:] {
_, _, err := pushProjection(ctx, expr, source, inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
}
}
return offset, added, nil
}
func pushProjectionIntoSemiJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
reuseCol bool,
node *semiJoin,
inner, hasAggregation bool,
) (int, bool, error) {
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
offset, added, err := pushProjection(ctx, expr, node.lhs, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column := -(offset + 1)
if reuseCol && !added {
for idx, col := range node.cols {
if column == col {
return idx, false, nil
}
}
}
node.cols = append(node.cols, column)
return len(node.cols) - 1, true, nil
}
func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) {
colName, isColName := expr.Expr.(*sqlparser.ColName)
for _, aggregate := range node.aggregates {
if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) {
return aggregate.Col, false, nil
}
if isColName && colName.Name.EqualString(aggregate.Alias) {
return aggregate.Col, false, nil
}
}
for _, key := range node.groupByKeys {
if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) {
return key.KeyCol, false, nil
}
}
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
aggr := engine.NewAggregateParam(popcode.AggregateAnyValue, offset, expr.ColumnName())
aggr.Expr = expr.Expr
aggr.Original = expr
node.aggregates = append(node.aggregates, aggr)
return offset, true, nil
}
func | (
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *simpleProjection,
inner, hasAggregation, reuseCol bool,
) (int, bool, error) {
offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation)
if err != nil {
return 0, false, err
}
for i, value := range node.eSimpleProj.Cols {
// we return early if we already have the column in the simple projection's
// output list so we do not add it again.
if reuseCol && value == offset {
return i, false, nil
}
}
node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, offset)
return len(node.eSimpleProj.Cols) - 1, true, nil
}
func pushProjectionIntoJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *join,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
// now we break the expression into left and right side dependencies and rewrite the left ones to bind variables
joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
if err != nil {
return 0, false, err
}
// go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map.
// It is okay to reuse the columns on the left side since
// the final expression which will be selected will be pushed into the right side.
for i, col := range joinCol.LHSExprs {
colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false)
if err != nil {
return 0, false, err
}
node.Vars[joinCol.BvNames[i]] = colOffset
}
// push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not.
expr.Expr = joinCol.RHSExpr
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func pushProjectionIntoHashJoin(
ctx *plancontext.PlanningContext,
expr *sqlparser.AliasedExpr,
node *hashJoin,
reuseCol, inner, hasAggregation bool,
) (int, bool, error) {
lhsSolves := node.Left.ContainsTables()
rhsSolves := node.Right.ContainsTables()
deps := ctx.SemTable.RecursiveDeps(expr.Expr)
var column int
var appended bool
passDownReuseCol := reuseCol
if !reuseCol {
passDownReuseCol = expr.As.IsEmpty()
}
switch {
case deps.IsSolvedBy(lhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = -(offset + 1)
appended = added
case deps.IsSolvedBy(rhsSolves):
offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
column = offset + 1
appended = added
default:
// if an expression has aggregation, then it should not be split up and pushed to both sides,
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
return 0, false, vterrors.VT12001("hash join with projection from both sides of the join")
}
if reuseCol && !appended {
for idx, col := range node.Cols {
if column == col {
return idx, false, nil
}
}
// the column was not appended to either child, but we could not find it in out cols list,
// so we'll still add it
}
node.Cols = append(node.Cols, column)
return len(node.Cols) - 1, true, nil
}
func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *route, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) {
if reuseCol {
if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 {
return i, false, nil
}
}
sqlparser.RemoveKeyspaceFromColName(expr.Expr)
sel, isSel := rb.Select.(*sqlparser.Select)
if !isSel {
return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select))
}
if ctx.RewriteDerivedExpr {
// if we are trying to push a projection that belongs to a DerivedTable
// we rewrite that expression, so it matches the column name used inside
// that derived table.
err := rewriteProjectionOfDerivedTable(expr, ctx.SemTable)
if err != nil {
return 0, false, err
}
}
offset := len(sel.SelectExprs)
sel.SelectExprs = append(sel.SelectExprs, expr)
return offset, true, nil
}
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
if isDerivedTable {
expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
}
return nil
}
| pushProjectionIntoSimpleProj | identifier_name |
app.js | $(function () {
var $fullText = $('.admin-fullText');
$('#admin-fullscreen').on('click', function () {
$.AMUI.fullscreen.toggle();
});
$(document).on($.AMUI.fullscreen.raw.fullscreenchange, function () {
$fullText.text($.AMUI.fullscreen.isFullscreen ? '退出全屏' : '开启全屏'); | });
var dataType = $('body').attr('data-type');
for (key in pageData) {
if (key == dataType) {
pageData[key]();
}
}
$('.tpl-switch').find('.tpl-switch-btn-view').on('click', function () {
$(this).prev('.tpl-switch-btn').prop("checked", function () {
if ($(this).is(':checked')) {
return false
} else {
return true
}
})
// console.log('123123123')
});
/**
* 下拉菜单支持键盘事件
*/
var keyState = false, $dropdown, $prevLi;
$("body").on("opened.dropdown.amui", ".am-dropdown", function () {
$dropdown = $(this);
keyState = true;
$prevLi = undefined;
});
$("body").on("closed.dropdown.amui", ".am-dropdown", function () {
keyState = false;
$prevLi = undefined;
});
$("body").on("keyup", function (e) {
if (keyState) {
if (e.keyCode == 38) {
//向上
if ($prevLi != undefined) {
if ($prevLi.prev().get(0) == undefined) {
return;
}
$prevLi = $prevLi.prev();
$prevLi.children().focus();
}
} else if (e.keyCode == 40) {
//向下
if ($prevLi != undefined) {
if ($prevLi.next().get(0) == undefined) {
return;
}
$prevLi = $prevLi.next();
$prevLi.children().focus();
} else {
$prevLi = $dropdown.find("li").first();
$prevLi.children().focus();
}
} else if (e.keyCode == 13 && $prevLi != undefined) {
$dropdown.dropdown('close');
}
}
});
$("input").val("");
//消息提醒
var msg = getQueryString()["msg"];
if (msg != null) {
toastr.info(msg);
}
//获取url中的参数
function getQueryString() {
var qs = location.search.substr(1), // 获取url中"?"符后的字串
args = {}, // 保存参数数据的对象
items = qs.length ? qs.split("&") : [], // 取得每一个参数项,
item = null,
len = items.length;
for (var i = 0; i < len; i++) {
item = items[i].split("=");
var name = decodeURIComponent(item[0]),
value = decodeURIComponent(item[1]);
if (name) {
args[name] = value;
}
}
return args;
}
//获取总积分
subOtherData("/m/queryIntegral").done(function (result) {
$("#sum_integral").text(result.data);
});
})
// ==========================
// 侧边导航下拉列表
// ==========================
$('.tpl-left-nav-link-list').on('click', function () {
$(this).siblings('.tpl-left-nav-sub-menu').slideToggle(80)
.end()
.find('.tpl-left-nav-more-ico').toggleClass('tpl-left-nav-more-ico-rotate');
})
// ==========================
// 头部导航隐藏菜单
// ==========================
$('.tpl-header-nav-hover-ico').on('click', function () {
$('.tpl-left-nav').toggle();
$('.tpl-content-wrapper').toggleClass('tpl-content-wrapper-hover');
})
// 页面数据
var pageData = {
// ===============================================
// 首页
// ===============================================
'index': function indexData() {
},
}
/**
* 初始化表格
* @param $table
* @param url
* @param param
* @param noHover
*/
function initTableByServer($table, url, param, noHover) {
if (noHover == undefined) {
$table.on("mouseover", "td", function () {
$("td").stop(true, true);
$(prevTd).animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(prevTd).siblings().animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(this).animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).siblings().animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).attr("title", $(this).text());
prevTd = $(this);
});
}
return $table.bootstrapTable({
showHeader: true, //是否显示列头,
toolbarAlign: "right",
editable: param.edit === undefined ? false : param.edit,//开启编辑模式
url: url,
method: 'post', //请求方式(*)
toolbar: '#toolbar', //工具按钮用哪个容器
dataType: 'json',
striped: true, //是否显示行间隔色
cache: true, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
sortable: true, //是否启用排序
sortOrder: "asc", //排序方式
queryParams: param.query,//传递参数(*)
queryParamsType: '', //设置参数类型为restfull风格
contentType: 'application/x-www-form-urlencoded;charset=UTF-8;', //post方式提交json数据时,选择表单类型
sidePagination: "server", //分页方式:client客户端分页,server服务端分页(*)
pageNumber: 1, //初始化加载第一页,默认第一页
pageSize: param.size == undefined ? 10 : param.size, //每页的记录行数(*)
pageList: param.pageList ? [10, 20, 50, 100] : param.pageList, //可供选择的每页的行数(*)
strictSearch: false, //false:模糊搜索,true精确搜索
clickToSelect: true, //是否启用点击选中行
height: param.height, //行高,如果没有设置height属性,表格自动根据记录条数觉得表格高度 "550"
pagination: param.pagination == undefined ? true : param.pagination, //是否显示分页(*)
// paginationLoop: param.paginationLoop,
// onlyInfoPagination: param.onlyInfoPagination,
uniqueId: "id", //每一行的唯一标识,一般为主键列
cardView: false, //是否显示详细视图
detailView: param.detailView == undefined ? false : param.detailView, //是否显示父子表
responseHandler: param.res, //服务器返回数据后的处理
columns: param.columns, //列集合
search: param.search == undefined ? true : param.search, //是否开启搜索
showExport: true,//显示导出按钮
exportDataType: "basic",//导出数据类型
exportTypes: ["excel", 'xls'], //导出类型
showFooter: false,
onLoadSuccess: param.loadSuccess,
classes: param.classes == undefined ? "am-table am-table-striped am-table-hover table-main" : param.classes,
onEditableSave: param.onEditableSave == undefined ? " " : param.onEditableSave,
onClickRow: function (row, element, field) {
// $(element).addClass('select');//添加当前选中的 success样式用于区别
},
onDblClickRow: param.onDblClickRow == undefined ? null : param.onDblClickRow,
onExpandRow: param.onExpandRow == undefined ? null : param.onExpandRow
});
}
/**
* 提交普通文本
* @param url
* @param params
* @returns {*}
*/
function subOtherData(url, params, traditional) {
return $.ajax({
url: url,
type: 'post',
data: params,
traditional: traditional === undefined ? false : traditional,
dataType: 'json',
error: function (e) {
toastr.error(e.state);
},
});
}
/**
* 日期转换
* @param fmt
* @returns {*}
* @constructor
*/
Date.prototype.format = function (fmt) { //author: meizz
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
//获取这周的周一
function getFirstDayOfWeek(date) {
var weekday = date.getDay() || 7; //获取星期几,getDay()返回值是 0(周日) 到 6(周六) 之间的一个整数。0||7为7,即weekday的值为1-7
date.setDate(date.getDate() - weekday + 1);//往前算(weekday-1)天,年份、月份会自动变化
return date.format("yyyy-MM-dd");
}
//获取当月第一天
function getFirstDayOfMonth(date) {
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当季第一天
function getFirstDayOfSeason(date) {
var month = date.getMonth();
if (month < 3) {
date.setMonth(0);
} else if (2 < month && month < 6) {
date.setMonth(3);
} else if (5 < month && month < 9) {
date.setMonth(6);
} else if (8 < month && month < 11) {
date.setMonth(9);
}
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当年第一天
function getFirstDayOfYear(date) {
date.setDate(1);
date.setMonth(0);
return date.format("yyyy-MM-dd");
}
/**
* 获取上一个月的日期
* @date 格式为yyyy-mm-dd的日期,如:2014-01-25
*/
function getPreMonth(date) {
var arr = date.split('-');
var year = arr[0]; //获取当前日期的年份
var month = arr[1]; //获取当前日期的月份
var day = arr[2]; //获取当前日期的日
var days = new Date(year, month, 0);
days = days.getDate(); //获取当前日期中月的天数
var year2 = year;
var month2 = parseInt(month) - 1;
if (month2 == 0) {//如果是1月份,则取上一年的12月份
year2 = parseInt(year2) - 1;
month2 = 12;
}
var day2 = day;
var days2 = new Date(year2, month2, 0);
days2 = days2.getDate();
if (day2 > days2) {//如果原来日期大于上一月的日期,则取当月的最大日期。比如3月的30日,在2月中没有30
day2 = days2;
}
var t2 = year2 + '-' + parse0(month2) + '-' + parse0(day2);
return t2;
}
/**
* 获取日期中月份的第一天
*/
function getfirstDate(firstDate) {
firstDate.setDate(1); //第一天
var year = firstDate.getFullYear();
firstDate.setMonth((firstDate.getMonth() + 1));
var month = parse0(firstDate.getMonth());
if (month == "00") {
month = "12";
}
return year + "-" + month + "-" + parse0(firstDate.getDate());
}
/**
* 获取日期中月份的最后一天
* @param data
*/
function getEndDate(endDate) {
endDate.setMonth((endDate.getMonth() + 1));
endDate.setDate(0); //最后一天
return endDate.getFullYear() + "-" + parse0(endDate.getMonth() + 1) + "-" + parse0(endDate.getDate());
}
/**
* 日期补零方法
* @param s
* @returns {string}
*/
function parse0(s) {
s += "";
return s.length < 2 ? '0' + s : s;
}
/**
* 日期转周几
* @param date
* @returns {*}
*/
function getMyDay(date) {
var week;
if (date.getDay() == 0) week = "周日";
else if (date.getDay() == 1) week = "周一";
else if (date.getDay() == 2) week = "周二";
else if (date.getDay() == 3) week = "周三";
else if (date.getDay() == 4) week = "周四";
else if (date.getDay() == 5) week = "周五";
else if (date.getDay() == 6) week = "周六";
return week;
} | random_line_split | |
app.js | $(function () {
var $fullText = $('.admin-fullText');
$('#admin-fullscreen').on('click', function () {
$.AMUI.fullscreen.toggle();
});
$(document).on($.AMUI.fullscreen.raw.fullscreenchange, function () {
$fullText.text($.AMUI.fullscreen.isFullscreen ? '退出全屏' : '开启全屏');
});
var dataType = $('body').attr('data-type');
for (key in pageData) {
if (key == dataType) {
pageData[key]();
}
}
$('.tpl-switch').find('.tpl-switch-btn-view').on('click', function () {
$(this).prev('.tpl-switch-btn').prop("checked", function () {
if ($(this).is(':checked')) {
return false
} else {
return true
}
})
// console.log('123123123')
});
/**
* 下拉菜单支持键盘事件
*/
var keyState = false, $dropdown, $prevLi;
$("body").on("opened.dropdown.amui", ".am-dropdown", function () {
$dropdown = $(this);
keyState = true;
$prevLi = undefined;
});
$("body").on("closed.dropdown.amui", ".am-dropdown", function () {
keyState = false;
$prevLi = undefined;
});
$("body").on("keyup", function (e) {
if (keyState) {
if (e.keyCode == 38) {
//向上
if ($prevLi != undefined) {
if ($prevLi.prev().get(0) == undefined) {
return;
}
$prevLi = $prevLi.prev();
$prevLi.children().focus();
}
} else if (e.keyCode == 40) {
//向下
if ($prevLi != undefined) {
if ($prevLi.next().get(0) == undefined) {
return;
}
$prevLi = $prevLi.next();
$prevLi.children().focus();
} else {
$prevLi = $dropdown.find("li").first();
$prevLi.children().focus();
}
} else if (e.keyCode == 13 && $prevLi != undefined) {
$dropdown.dropdown('close');
}
}
});
$("input").val("");
//消息提醒
var msg = getQueryString()["msg"];
if (msg != null) {
toastr.info(msg);
}
//获取url中的参数
function getQueryString() {
var qs = location.search.substr(1), // 获取url中"?"符后的字串
args = {}, // 保存参数数据的对象
items = qs.length ? qs.split("&") : [], // 取得每一个参数项,
item = null,
len = items.length;
for (var i = 0; i < len; i++) {
item = items[i].split("=");
var name = decodeURIComponent(item[0]),
value = decodeURIComponent(item[1]);
if (name) {
args[name] = value;
}
}
return args;
}
//获取总积分
subOtherData("/m/queryIntegral").done(function (result) {
$("#sum_integral").text(result.data);
});
})
// ==========================
// 侧边导航下拉列表
// ==========================
$('.tpl-left-nav-link-list').on('click', function () {
$(this).siblings('.tpl-left-nav-sub-menu').slideToggle(80)
.end()
.find('.tpl-left-nav-more-ico').toggleClass('tpl-left-nav-more-ico-rotate');
})
// ==========================
// 头部导航隐藏菜单
// ==========================
$('.tpl-header-nav-hover-ico').on('click', function () {
$('.tpl-left-nav').toggle();
$('.tpl-content-wrapper').toggleClass('tpl-content-wrapper-hover');
})
// 页面数据
var pageData = {
// ===============================================
// 首页
// ===============================================
'index': function indexData() {
},
}
/**
* 初始化表格
* @param $table
* @param url
* @param param
* @param noHover
*/
function initTableByServer($table, url, param, noHover) {
if (noHover == undefined) {
$table.on("mouseover", "td", function () {
$("td").stop(true, true);
$(prevTd).animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(prevTd).siblings().animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(this).animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).siblings().animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).attr("title", $(this).text());
prevTd = $(this);
});
}
return $table.bootstrapTable({
showHeader: true, //是否显示列头,
toolbarAlign: "right",
editable: param.edit === undefined ? false : param.edit,//开启编辑模式
url: url,
method: 'post', //请求方式(*)
toolbar: '#toolbar', //工具按钮用哪个容器
dataType: 'json',
striped: true, //是否显示行间隔色
cache: true, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
sortable: true, //是否启用排序
sortOrder: "asc", //排序方式
queryParams: param.query,//传递参数(*)
queryParamsType: '', //设置参数类型为restfull风格
contentType: 'application/x-www-form-urlencoded;charset=UTF-8;', //post方式提交json数据时,选择表单类型
sidePagination: "server", //分页方式:client客户端分页,server服务端分页(*)
pageNumber: 1, //初始化加载第一页,默认第一页
pageSize: param.size == undefined ? 10 : param.size, //每页的记录行数(*)
pageList: param.pageList ? [10, 20, 50, 100] : param.pageList, //可供选择的每页的行数(*)
strictSearch: false, //false:模糊搜索,true精确搜索
clickToSelect: true, //是否启用点击选中行
height: param.height, //行高,如果没有设置height属性,表格自动根据记录条数觉得表格高度 "550"
pagination: param.pagination == undefined ? true : param.pagination, //是否显示分页(*)
// paginationLoop: param.paginationLoop,
// onlyInfoPagination: param.onlyInfoPagination,
uniqueId: "id", //每一行的唯一标识,一般为主键列
cardView: false, //是否显示详细视图
detailView: param.detailView == undefined ? false : param.detailView, //是否显示父子表
responseHandler: param.res, //服务器返回数据后的处理
columns: param.columns, //列集合
search: param.search == undefined ? true : param.search, //是否开启搜索
showExport: true,//显示导出按钮
exportDataType: "basic",//导出数据类型
exportTypes: ["excel", 'xls'], //导出类型
showFooter: false,
onLoadSuccess: param.loadSuccess,
classes: param.classes == undefined ? "am-table am-table-striped am-table-hover table-main" : param.classes,
onEditableSave: param.onEditableSave == undefined ? " " : param.onEditableSave,
onClickRow: function (row, element, field) {
// $(element).addClass('select');//添加当前选中的 success样式用于区别
},
onDblClickRow: param.onDblClickRow == undefined ? null : param.onDblClickRow,
onExpandRow: param.onExpandRow == undefined ? null : param.onExpandRow
});
}
/**
* 提交普通文本
* @param url
* @param params
* @returns {*}
*/
function subOtherData(url, params, traditional) {
return $.ajax({
url: url,
type: 'post',
data: params,
traditional: traditional === undefined ? false : traditional,
dataType: 'json',
error: function (e) {
toastr.error(e.state);
},
});
}
/**
* 日期转换
* @param fmt
* @returns {*}
* @constructor
*/
Date.prototype.format = function (fmt) { //author: meizz
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
//获取这周的周一
function getFirstDayOfWeek(date) {
var weekday = date.getDay() || 7; //获取星期几,getDay()返回值是 0(周日) 到 6(周六) 之间的一个整数。0||7为7,即weekday的值为1-7
date.setDate(date.getDate() - weekday + 1);//往前算(weekday-1)天,年份、月份会自动变化
return date.format("yyyy-MM-dd");
}
//获取当月第一天
function getFirstDayOfMonth(date) {
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当季第一天
function getFirstDayOfSeason(date) {
var month = date.getMonth();
if (month < 3) {
date.setMonth(0);
} else if (2 < month && month < 6) {
date.setMonth(3);
} else if (5 < month && month < 9) {
date.setMonth(6);
} else if (8 < month && month < 11) {
date.setMonth(9);
}
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当年第一天
function getFirstDayOfYear(date) {
date.setDate(1);
date.setMonth(0);
return date.format("yyyy-MM-dd");
}
/**
* 获取上一个月的日期
* @date 格式为yyyy-mm-dd的日期,如:2014-01-25
*/
function getPreMonth(date) {
var arr = date.split('-');
var year = arr[0]; //获取当前日期的年份
var month = arr[1]; //获取当前日期的月份
var day = arr[2]; //获取当前日期的日
var days = new Date(year, month, 0);
days = days.getDate(); //获取当前日期中月的天数
var year2 = year;
var month2 = parseInt(month) - 1;
if (month2 == 0) {//如果是1月份,则取上一年的12月份
year2 = parseInt(year2) - 1;
month2 = 12;
}
var day2 = day;
var days2 = new Date(year2, month2, 0);
days2 = days2.getDate();
if (day2 > days2) {//如果原来日期大于上一月的日期,则取当月的最大日期。比如3月的30日,在2月中没有30
day2 = days2;
}
var t2 = year2 + '-' + parse0(month2) + '-' + parse0(day2);
return t2;
}
/**
* 获取日期中月份的第一天
*/
function getfirstDate(firstDate) {
firstDate.setDate(1); //第一天
var year = firstDate.getFullYear();
firstDate.setMonth((firstDate.getMonth() + 1));
var month = parse0(firstDate.getMonth());
if (month == "00") {
month = "12";
}
return year + "-" + month + "-" + parse0(firstDate.getDate());
}
/**
* 获取日期中月份的最后一天
* @param data
*/
function getEndDate(endDate) {
endDate.setMonth((endDate.getMonth() + 1));
endDate.setDate(0); //最后一天
return endDate.getFullYear() + "-" + parse0(endDate.getMonth() + 1) + "-" + parse0(endDate.getDate());
}
/**
* 日期补零方法
* @param s
* @returns {string}
*/
function parse0(s) {
s += "";
return s.length < 2 ? '0' + s : s;
}
/**
* 日期转周几
* @param date
* @returns {*}
*/
function getMyDay(date) {
var week;
if (date.getDay() == 0) week = "周日";
else if (date.getDay() == 1) week = "周一";
else if (date.getDay() == 2) week = "周二";
else if (date.getDay() == 3) week = "周三";
else if (date.getDay() == 4) week = "周四";
else if (date.getDay() == 5) week = "周五";
else if (date.getDay() == 6) week = "周六";
return week;
}
| conditional_block | ||
app.js | $(function () {
var $fullText = $('.admin-fullText');
$('#admin-fullscreen').on('click', function () {
$.AMUI.fullscreen.toggle();
});
$(document).on($.AMUI.fullscreen.raw.fullscreenchange, function () {
$fullText.text($.AMUI.fullscreen.isFullscreen ? '退出全屏' : '开启全屏');
});
var dataType = $('body').attr('data-type');
for (key in pageData) {
if (key == dataType) {
pageData[key]();
}
}
$('.tpl-switch').find('.tpl-switch-btn-view').on('click', function () {
$(this).prev('.tpl-switch-btn').prop("checked", function () {
if ($(this).is(':checked')) {
return false
} else {
return true
}
})
// console.log('123123123')
});
/**
* 下拉菜单支持键盘事件
*/
var keyState = false, $dropdown, $prevLi;
$("body").on("opened.dropdown.amui", ".am-dropdown", function () {
$dropdown = $(this);
keyState = true;
$prevLi = undefined;
});
$("body").on("closed.dropdown.amui", ".am-dropdown", function () {
keyState = false;
$prevLi = undefined;
});
$("body").on("keyup", function (e) {
if (keyState) {
if (e.keyCode == 38) {
//向上
if ($prevLi != undefined) {
if ($prevLi.prev().get(0) == undefined) {
return;
}
$prevLi = $prevLi.prev();
$prevLi.children().focus();
}
} else if (e.keyCode == 40) {
//向下
if ($prevLi != undefined) {
if ($prevLi.next().get(0) == undefined) {
return;
}
$prevLi = $prevLi.next();
$prevLi.children().focus();
} else {
$prevLi = $dropdown.find("li").first();
$prevLi.children().focus();
}
} else if (e.keyCode == 13 && $prevLi != undefined) {
$dropdown.dropdown('close');
}
}
});
$("input").val("");
//消息提醒
var msg = getQueryString()["msg"];
if (msg != null) {
toastr.info(msg);
}
//获取url中的参数
function getQueryString() {
var qs = location.search.substr(1), // 获取url中"?"符后的字串
args = {}, // 保存参数数据的对象
items = qs.length ? qs.split("&") : [], // 取得每一个参数项,
item = null,
len = items.length;
for (var i = 0; i < len; i++) {
item = items[i].split("=");
var name = decodeURIComponent(item[0]),
value = decodeURIComponent(item[1]);
if (name) {
args[name] = value;
}
}
return args;
}
//获取总积分
subOtherData("/m/queryIntegral").done(function (result) {
$("#sum_integral").text(result.data);
});
})
// ==========================
// 侧边导航下拉列表
// ==========================
$('.tpl-left-nav-link-list').on('click', function () {
$(this).siblings('.tpl-left-nav-sub-menu').slideToggle(80)
.end()
.find('.tpl-left-nav-more-ico').toggleClass('tpl-left-nav-more-ico-rotate');
})
// ==========================
// 头部导航隐藏菜单
// ==========================
$('.tpl-header-nav-hover-ico').on('click', function () {
$('.tpl-left-nav').toggle();
$('.tpl-content-wrapper').toggleClass('tpl-content-wrapper-hover');
})
// 页面数据
var pageData = {
// ===============================================
// 首页
// ===============================================
'index': function indexData() {
},
}
/**
* 初始化表格
* @param $table
* @param url
* @param param
* @param noHover
*/
function initTableByServer($table, url, param, noHover) {
if (noHover == undefined) {
$table.on("mouseover", "td", function () {
$("td").stop(true, true);
$(prevTd).animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(prevTd).siblings().animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(this).animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).siblings().animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).attr("title", $(this).text());
prevTd = $(this);
});
}
return $table.bootstrapTable({
showHeader: true, //是否显示列头,
toolbarAlign: "right",
editable: param.edit === undefined ? false : param.edit,//开启编辑模式
url: url,
method: 'post', //请求方式(*)
toolbar: '#toolbar', //工具按钮用哪个容器
dataType: 'json',
striped: true, //是否显示行间隔色
cache: true, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
sortable: true, //是否启用排序
sortOrder: "asc", //排序方式
queryParams: param.query,//传递参数(*)
queryParamsType: '', //设置参数类型为restfull风格
contentType: 'application/x-www-form-urlencoded;charset=UTF-8;', //post方式提交json数据时,选择表单类型
sidePagination: "server", //分页方式:client客户端分页,server服务端分页(*)
pageNumber: 1, //初始化加载第一页,默认第一页
pageSize: param.size == undefined ? 10 : param.size, //每页的记录行数(*)
pageList: param.pageList ? [10, 20, 50, 100] : param.pageList, //可供选择的每页的行数(*)
strictSearch: false, //false:模糊搜索,true精确搜索
clickToSelect: true, //是否启用点击选中行
height: param.height, //行高,如果没有设置height属性,表格自动根据记录条数觉得表格高度 "550"
pagination: param.pagination == undefined ? true : param.pagination, //是否显示分页(*)
// paginationLoop: param.paginationLoop,
// onlyInfoPagination: param.onlyInfoPagination,
uniqueId: "id", //每一行的唯一标识,一般为主键列
cardView: false, //是否显示详细视图
detailView: param.detailView == undefined ? false : param.detailView, //是否显示父子表
responseHandler: param.res, //服务器返回数据后的处理
columns: param.columns, //列集合
search: param.search == undefined ? true : param.search, //是否开启搜索
showExport: true,//显示导出按钮
exportDataType: "basic",//导出数据类型
exportTypes: ["excel", 'xls'], //导出类型
showFooter: false,
onLoadSuccess: param.loadSuccess,
classes: param.classes == undefined ? "am-table am-table-striped am-table-hover table-main" : param.classes,
onEditableSave: param.onEditableSave == undefined ? " " : param.onEditableSave,
onClickRow: function (row, element, field) {
// $(element).addClass('select');//添加当前选中的 success样式用于区别
},
onDblClickRow: param.onDblClickRow == undefined ? null : param.onDblClickRow,
onExpandRow: param.onExpandRow == undefined ? null : param.onExpandRow
});
}
/**
* 提交普通文本
* @param url
* @param params
* @returns {*}
*/
function subOtherData(url, params, traditional) {
return $.ajax({
url: url,
type: 'post',
data: params,
traditional: traditional === undefined ? false : traditional,
dataType: 'json',
error: function (e) {
toastr.error(e.state);
},
});
}
/**
* 日期转换
* @param fmt
* @returns {*}
* @constructor
*/
Date.prototype.format = function (fmt) { //author: meizz
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
//获取这周的周一
function getFirstDayOfWeek(date) {
var weekday = date.getDay() || 7; //获取星期几,getDay()返回值是 0(周日) 到 6(周六) 之间的一个整数。0||7为7,即weekday的值为1-7
date.setDate(date.getDate() - weekday + 1);//往前算(weekday-1)天,年份、月份会自动变化
return date.format("yyyy-MM-dd");
}
//获取当月第一天
function getFirstDayOfMonth(date) {
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当季第一天
function getFirstDayOfSeason(date) {
var month = date.getMonth();
if (month < 3) {
date.setMonth(0);
} else if (2 < month && month < 6) {
date.setMonth(3);
} else if (5 < month && month < 9) {
date.setMonth(6);
} else if (8 < month && month < 11) {
date.setMonth(9);
}
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当年第一天
function getFirstDayOfYear(date) {
date.setDate(1);
date.setMonth(0);
return date.format("yyyy-MM-dd");
}
/**
* 获取上一个月的日期
* @date 格式为yyyy-mm-dd的日期,如:2014-01-25
*/
function getPreMonth(date) {
var arr = date.split('-');
var year = arr[0]; //获取当前日期的年份
var month = arr[1]; //获取当前日期的月份
var day = arr[2]; //获取当前日期的日
var days = new Date(year, month, 0);
days = days.getDate(); //获取当前日期中月的天数
var year2 = year;
var month2 = parseInt(month) - 1;
if (month2 == 0) {//如果是1月份,则取上一年的12月份
year2 = parseInt(year2) - 1;
month2 = 12;
}
var day2 = day;
var days2 = new Date(year2, month2, 0);
days2 = days2.getDate();
if (day2 > days2) {//如果原来日期大于上一月的日期,则取当月的最大日期。比如3月的30日,在2月中没有30
day2 = days2;
}
var t2 = year2 + '-' + parse0(month2) + '-' + parse0(day2);
return t2;
}
/**
* 获取日期中月份的第一天
*/
function getfirstDate(firstDate) {
firstDate.setDate(1); //第一天
var year = firstDate.getFull | irstDate.getMonth());
if (month == "00") {
month = "12";
}
return year + "-" + month + "-" + parse0(firstDate.getDate());
}
/**
* 获取日期中月份的最后一天
* @param data
*/
function getEndDate(endDate) {
endDate.setMonth((endDate.getMonth() + 1));
endDate.setDate(0); //最后一天
return endDate.getFullYear() + "-" + parse0(endDate.getMonth() + 1) + "-" + parse0(endDate.getDate());
}
/**
* 日期补零方法
* @param s
* @returns {string}
*/
function parse0(s) {
s += "";
return s.length < 2 ? '0' + s : s;
}
/**
* 日期转周几
* @param date
* @returns {*}
*/
function getMyDay(date) {
var week;
if (date.getDay() == 0) week = "周日";
else if (date.getDay() == 1) week = "周一";
else if (date.getDay() == 2) week = "周二";
else if (date.getDay() == 3) week = "周三";
else if (date.getDay() == 4) week = "周四";
else if (date.getDay() == 5) week = "周五";
else if (date.getDay() == 6) week = "周六";
return week;
}
| Year();
firstDate.setMonth((firstDate.getMonth() + 1));
var month = parse0(f | identifier_body |
app.js | $(function () {
var $fullText = $('.admin-fullText');
$('#admin-fullscreen').on('click', function () {
$.AMUI.fullscreen.toggle();
});
$(document).on($.AMUI.fullscreen.raw.fullscreenchange, function () {
$fullText.text($.AMUI.fullscreen.isFullscreen ? '退出全屏' : '开启全屏');
});
var dataType = $('body').attr('data-type');
for (key in pageData) {
if (key == dataType) {
pageData[key]();
}
}
$('.tpl-switch').find('.tpl-switch-btn-view').on('click', function () {
$(this).prev('.tpl-switch-btn').prop("checked", function () {
if ($(this).is(':checked')) {
return false
} else {
return true
}
})
// console.log('123123123')
});
/**
* 下拉菜单支持键盘事件
*/
var keyState = false, $dropdown, $prevLi;
$("body").on("opened.dropdown.amui", ".am-dropdown", function () {
$dropdown = $(this);
keyState = true;
$prevLi = undefined;
});
$("body").on("closed.dropdown.amui", ".am-dropdown", function () {
keyState = false;
$prevLi = undefined;
});
$("body").on("keyup", function (e) {
if (keyState) {
if (e.keyCode == 38) {
//向上
if ($prevLi != undefined) {
if ($prevLi.prev().get(0) == undefined) {
return;
}
$prevLi = $prevLi.prev();
$prevLi.children().focus();
}
} else if (e.keyCode == 40) {
//向下
if ($prevLi != undefined) {
if ($prevLi.next().get(0) == undefined) {
return;
}
$prevLi = $prevLi.next();
$prevLi.children().focus();
} else {
$prevLi = $dropdown.find("li").first();
$prevLi.children().focus();
}
} else if (e.keyCode == 13 && $prevLi != undefined) {
$dropdown.dropdown('close');
}
}
});
$("input").val("");
//消息提醒
var msg = getQueryString()["msg"];
if (msg != null) {
toastr.info(msg);
}
//获取url中的参数
function getQueryString() {
var qs = location.search.substr(1), / | 字串
args = {}, // 保存参数数据的对象
items = qs.length ? qs.split("&") : [], // 取得每一个参数项,
item = null,
len = items.length;
for (var i = 0; i < len; i++) {
item = items[i].split("=");
var name = decodeURIComponent(item[0]),
value = decodeURIComponent(item[1]);
if (name) {
args[name] = value;
}
}
return args;
}
//获取总积分
subOtherData("/m/queryIntegral").done(function (result) {
$("#sum_integral").text(result.data);
});
})
// ==========================
// 侧边导航下拉列表
// ==========================
$('.tpl-left-nav-link-list').on('click', function () {
$(this).siblings('.tpl-left-nav-sub-menu').slideToggle(80)
.end()
.find('.tpl-left-nav-more-ico').toggleClass('tpl-left-nav-more-ico-rotate');
})
// ==========================
// 头部导航隐藏菜单
// ==========================
$('.tpl-header-nav-hover-ico').on('click', function () {
$('.tpl-left-nav').toggle();
$('.tpl-content-wrapper').toggleClass('tpl-content-wrapper-hover');
})
// 页面数据
var pageData = {
// ===============================================
// 首页
// ===============================================
'index': function indexData() {
},
}
/**
* 初始化表格
* @param $table
* @param url
* @param param
* @param noHover
*/
function initTableByServer($table, url, param, noHover) {
if (noHover == undefined) {
$table.on("mouseover", "td", function () {
$("td").stop(true, true);
$(prevTd).animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(prevTd).siblings().animate({backgroundColor: ($(prevTd).parent().index()) % 2 == 0 ? "#f9f9f9" : ""}, sleep);
$(this).animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).siblings().animate({backgroundColor: "#e4e7f3"}, sleep);
$(this).attr("title", $(this).text());
prevTd = $(this);
});
}
return $table.bootstrapTable({
showHeader: true, //是否显示列头,
toolbarAlign: "right",
editable: param.edit === undefined ? false : param.edit,//开启编辑模式
url: url,
method: 'post', //请求方式(*)
toolbar: '#toolbar', //工具按钮用哪个容器
dataType: 'json',
striped: true, //是否显示行间隔色
cache: true, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
sortable: true, //是否启用排序
sortOrder: "asc", //排序方式
queryParams: param.query,//传递参数(*)
queryParamsType: '', //设置参数类型为restfull风格
contentType: 'application/x-www-form-urlencoded;charset=UTF-8;', //post方式提交json数据时,选择表单类型
sidePagination: "server", //分页方式:client客户端分页,server服务端分页(*)
pageNumber: 1, //初始化加载第一页,默认第一页
pageSize: param.size == undefined ? 10 : param.size, //每页的记录行数(*)
pageList: param.pageList ? [10, 20, 50, 100] : param.pageList, //可供选择的每页的行数(*)
strictSearch: false, //false:模糊搜索,true精确搜索
clickToSelect: true, //是否启用点击选中行
height: param.height, //行高,如果没有设置height属性,表格自动根据记录条数觉得表格高度 "550"
pagination: param.pagination == undefined ? true : param.pagination, //是否显示分页(*)
// paginationLoop: param.paginationLoop,
// onlyInfoPagination: param.onlyInfoPagination,
uniqueId: "id", //每一行的唯一标识,一般为主键列
cardView: false, //是否显示详细视图
detailView: param.detailView == undefined ? false : param.detailView, //是否显示父子表
responseHandler: param.res, //服务器返回数据后的处理
columns: param.columns, //列集合
search: param.search == undefined ? true : param.search, //是否开启搜索
showExport: true,//显示导出按钮
exportDataType: "basic",//导出数据类型
exportTypes: ["excel", 'xls'], //导出类型
showFooter: false,
onLoadSuccess: param.loadSuccess,
classes: param.classes == undefined ? "am-table am-table-striped am-table-hover table-main" : param.classes,
onEditableSave: param.onEditableSave == undefined ? " " : param.onEditableSave,
onClickRow: function (row, element, field) {
// $(element).addClass('select');//添加当前选中的 success样式用于区别
},
onDblClickRow: param.onDblClickRow == undefined ? null : param.onDblClickRow,
onExpandRow: param.onExpandRow == undefined ? null : param.onExpandRow
});
}
/**
* 提交普通文本
* @param url
* @param params
* @returns {*}
*/
function subOtherData(url, params, traditional) {
return $.ajax({
url: url,
type: 'post',
data: params,
traditional: traditional === undefined ? false : traditional,
dataType: 'json',
error: function (e) {
toastr.error(e.state);
},
});
}
/**
* 日期转换
* @param fmt
* @returns {*}
* @constructor
*/
Date.prototype.format = function (fmt) { //author: meizz
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
//获取这周的周一
function getFirstDayOfWeek(date) {
var weekday = date.getDay() || 7; //获取星期几,getDay()返回值是 0(周日) 到 6(周六) 之间的一个整数。0||7为7,即weekday的值为1-7
date.setDate(date.getDate() - weekday + 1);//往前算(weekday-1)天,年份、月份会自动变化
return date.format("yyyy-MM-dd");
}
//获取当月第一天
function getFirstDayOfMonth(date) {
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当季第一天
function getFirstDayOfSeason(date) {
var month = date.getMonth();
if (month < 3) {
date.setMonth(0);
} else if (2 < month && month < 6) {
date.setMonth(3);
} else if (5 < month && month < 9) {
date.setMonth(6);
} else if (8 < month && month < 11) {
date.setMonth(9);
}
date.setDate(1);
return date.format("yyyy-MM-dd");
}
//获取当年第一天
function getFirstDayOfYear(date) {
date.setDate(1);
date.setMonth(0);
return date.format("yyyy-MM-dd");
}
/**
* 获取上一个月的日期
* @date 格式为yyyy-mm-dd的日期,如:2014-01-25
*/
function getPreMonth(date) {
var arr = date.split('-');
var year = arr[0]; //获取当前日期的年份
var month = arr[1]; //获取当前日期的月份
var day = arr[2]; //获取当前日期的日
var days = new Date(year, month, 0);
days = days.getDate(); //获取当前日期中月的天数
var year2 = year;
var month2 = parseInt(month) - 1;
if (month2 == 0) {//如果是1月份,则取上一年的12月份
year2 = parseInt(year2) - 1;
month2 = 12;
}
var day2 = day;
var days2 = new Date(year2, month2, 0);
days2 = days2.getDate();
if (day2 > days2) {//如果原来日期大于上一月的日期,则取当月的最大日期。比如3月的30日,在2月中没有30
day2 = days2;
}
var t2 = year2 + '-' + parse0(month2) + '-' + parse0(day2);
return t2;
}
/**
* 获取日期中月份的第一天
*/
function getfirstDate(firstDate) {
firstDate.setDate(1); //第一天
var year = firstDate.getFullYear();
firstDate.setMonth((firstDate.getMonth() + 1));
var month = parse0(firstDate.getMonth());
if (month == "00") {
month = "12";
}
return year + "-" + month + "-" + parse0(firstDate.getDate());
}
/**
* 获取日期中月份的最后一天
* @param data
*/
function getEndDate(endDate) {
endDate.setMonth((endDate.getMonth() + 1));
endDate.setDate(0); //最后一天
return endDate.getFullYear() + "-" + parse0(endDate.getMonth() + 1) + "-" + parse0(endDate.getDate());
}
/**
* 日期补零方法
* @param s
* @returns {string}
*/
function parse0(s) {
s += "";
return s.length < 2 ? '0' + s : s;
}
/**
* 日期转周几
* @param date
* @returns {*}
*/
function getMyDay(date) {
var week;
if (date.getDay() == 0) week = "周日";
else if (date.getDay() == 1) week = "周一";
else if (date.getDay() == 2) week = "周二";
else if (date.getDay() == 3) week = "周三";
else if (date.getDay() == 4) week = "周四";
else if (date.getDay() == 5) week = "周五";
else if (date.getDay() == 6) week = "周六";
return week;
}
| / 获取url中"?"符后的 | identifier_name |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latitude;
let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32
})
.collect();
} else {
own_data = dt_slice
.iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
/ | // println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if !el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd != crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
}
| / // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d != &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
| conditional_block |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latit | let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32
})
.collect();
} else {
own_data = dt_slice
.iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
// // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d != &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
// println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if !el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd != crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
}
| ude;
| identifier_name |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latitude;
let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32 | .iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
// // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d != &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
// println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if !el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd != crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
} | })
.collect();
} else {
own_data = dt_slice | random_line_split |
user.go | package canvas
import (
"fmt"
"io"
"path"
"path/filepath"
"time"
)
// User is a canvas user
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Bio string `json:"bio"`
SortableName string `json:"sortable_name"`
ShortName string `json:"short_name"`
SisUserID string `json:"sis_user_id"`
SisImportID int `json:"sis_import_id"`
IntegrationID string `json:"integration_id"`
CreatedAt time.Time `json:"created_at"`
LoginID string `json:"login_id"`
AvatarURL string `json:"avatar_url"`
Enrollments []Enrollment `json:"enrollments"`
Locale string `json:"locale"`
EffectiveLocale string `json:"effective_locale"`
LastLogin time.Time `json:"last_login"`
TimeZone string `json:"time_zone"`
CanUpdateAvatar bool `json:"can_update_avatar"`
Permissions struct {
CanUpdateName bool `json:"can_update_name"`
CanUpdateAvatar bool `json:"can_update_avatar"`
LimitParentAppWebAccess bool `json:"limit_parent_app_web_access"`
} `json:"permissions"`
client doer
}
// Settings will get the user's settings.
func (u *User) Settings() (settings map[string]interface{}, err error) {
// TODO: find the settings json response and use a struct not a map
return settings, getjson(u.client, &settings, nil, "/users/%d/settings", u.ID)
}
// Courses will return the user's courses.
func (u *User) | (opts ...Option) ([]*Course, error) {
return getCourses(u.client, u.id("/users/%d/courses"), optEnc(opts))
}
// FavoriteCourses returns the user's list of favorites courses.
func (u *User) FavoriteCourses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, "/users/favorites/courses", optEnc(opts))
}
// File will get a user's file by id
func (u *User) File(id int, opts ...Option) (*File, error) {
return getUserFile(u.client, id, u.ID, opts)
}
// Files will return a channel of files.
func (u *User) Files(opts ...Option) <-chan *File {
return filesChannel(
u.client, u.id("/users/%d/files"),
ConcurrentErrorHandler, opts, nil,
)
}
// ListFiles will collect all of the users files.
func (u *User) ListFiles(opts ...Option) ([]*File, error) {
return listFiles(u.client, u.id("/users/%d/files"), nil, opts)
}
// Folders returns a channel of the user's folders.
func (u *User) Folders(opts ...Option) <-chan *Folder {
return foldersChannel(
u.client, u.id("/users/%d/folders"),
ConcurrentErrorHandler, opts, nil,
)
}
// Root will get the root folder for the user's files.
func (u *User) Root(opts ...Option) (*Folder, error) {
f := &Folder{client: u.client}
return f, getjson(u.client, f, optEnc(opts), "/users/%d/folders/root", u.ID)
}
// ListFolders will return a slice of all the user's folders
func (u *User) ListFolders(opts ...Option) ([]*Folder, error) {
return listFolders(u.client, u.id("/users/%d/folders"), nil, opts)
}
// FolderPath will split the path and return a list containing
// all of the folders in the path.
func (u *User) FolderPath(pth string) ([]*Folder, error) {
pth = path.Join(u.id("/users/%d/folders/by_path"), pth)
return folderList(u.client, pth)
}
// UploadFile will upload the contents of an io.Reader to a
// new file in the user's files and return the new file.
func (u *User) UploadFile(
filename string,
r io.Reader,
opts ...Option,
) (*File, error) {
return uploadFile(
u.client, r,
u.id("/users/%d/files"),
newFileUploadParams(filename, opts),
)
}
// CreateFolder will create a new folder.
func (u *User) CreateFolder(path string, opts ...Option) (*Folder, error) {
dir, name := filepath.Split(path)
return createFolder(
u.client, dir, name, opts,
"/users/%d/folders", u.ID,
)
}
// ContextCode returns the context code for the user.
func (u *User) ContextCode() string {
return fmt.Sprintf("user_%d", u.ID)
}
// CalendarEvents gets the user's calendar events.
func (u *User) CalendarEvents(opts ...Option) (cal []CalendarEvent, err error) {
return cal, getjson(u.client, &cal, optEnc(opts), "/users/%d/calendar_events", u.ID)
}
// Bookmarks will get the user's bookmarks
func (u *User) Bookmarks(opts ...Option) (bks []Bookmark, err error) {
return bks, getjson(u.client, &bks, optEnc(opts), "users/%d/bookmarks", u.ID)
}
// CreateBookmark will create a bookmark
func (u *User) CreateBookmark(b *Bookmark) error {
return createBookmark(u.client, u.ID, b)
}
// DeleteBookmark will delete a user's bookmark.
func (u *User) DeleteBookmark(b *Bookmark) error {
return deleteBookmark(u.client, u.ID, b.ID)
}
// Profile will make a call to get the user's profile data.
func (u *User) Profile() (p *UserProfile, err error) {
return p, getjson(u.client, p, nil, "/users/%d/profile", u.ID)
}
// UserProfile is a user's profile data.
type UserProfile struct {
ID int `json:"id"`
LoginID string `json:"login_id"`
Name string `json:"name"`
PrimaryEmail string `json:"primary_email"`
ShortName string `json:"short_name"`
SortableName string `json:"sortable_name"`
TimeZone string `json:"time_zone"`
Bio string `json:"bio"`
Title string `json:"title"`
Calendar map[string]string `json:"calendar"`
LTIUserID string `json:"lti_user_id"`
AvatarURL string `json:"avatar_url"`
EffectiveLocal string `json:"effective_local"`
IntegrationID string `json:"integration_id"`
Local string `json:"local"`
}
// GradedSubmissions gets the user's graded submissions.
func (u *User) GradedSubmissions() (subs []*Submission, err error) {
return subs, getjson(u.client, &subs, nil, "/users/%d/graded_submissions", u.ID)
}
// Submission is a submission type.
type Submission struct {
// A submission type can be any of:
// - "online_text_entry"
// - "online_url"
// - "online_upload"
// - "media_recording"
Type string `json:"submission_type" url:"submission_type"`
AssignmentID int `json:"assignment_id"`
Assignment interface{} `json:"assignment"`
Course interface{} `json:"course"`
Attempt int `json:"attempt"`
Body string `json:"body,omitempty"`
Grade string `json:"grade"`
GradeMatchesCurrentSubmission bool `json:"grade_matches_current_submission"`
HTMLURL string `json:"html_url,omitempty"`
PreviewURL string `json:"preview_url"`
Score float64 `json:"score"`
Comments interface{} `json:"submission_comments"`
SubmittedAt time.Time `json:"submitted_at"`
PostedAt time.Time `json:"posted_at"`
URL string `json:"url,omitempty"`
GraderID int `json:"grader_id"`
GradedAt time.Time `json:"graded_at"`
UserID int `json:"user_id"`
User interface{} `json:"user" url:"-"`
Late bool `json:"late"`
AssignmentVisible bool `json:"assignment_visible"`
Excused bool `json:"excused"`
Missing bool `json:"missing"`
LatePolicyStatus string `json:"late_policy_status"`
PointsDeducted float64 `json:"points_deducted"`
SecondsLate int `json:"seconds_late"`
WorkflowState string `json:"workflow_state"`
ExtraAttempts int `json:"extra_attempts"`
AnonymousID string `json:"anonymous_id"`
// Used assignment submission
FileIDs []int `json:"-" url:"file_ids,omitempty"`
MediaCommentID string `json:"-" url:"media_comment_id,omitempty"`
MediaCommentType string `json:"-" url:"media_comment_type,omitempty"` // "audio" or "video"
}
// Avatars will get a list of the user's avatars.
func (u *User) Avatars() (av []Avatar, err error) {
return av, getjson(u.client, &av, nil, "/users/%d/avatars", u.ID)
}
// Avatar is the avatar data for a user.
type Avatar struct {
ID int `json:"id"`
Type string `json:"type"`
DisplayName string `json:"display_name"`
Filename string `json:"filename"`
URL string `json:"url"`
Token string `json:"token"`
ContentType string `json:"content-type"`
Size int `json:"size"`
}
// UserColor is just a hex color.
type UserColor struct {
HexCode string `json:"hexcode"`
}
// Colors will return a map of the user's custom profile colors.
func (u *User) Colors() (map[string]string, error) {
colors := make(map[string]map[string]string)
err := getjson(u.client, &colors, nil, "users/%d/colors", u.ID)
if err != nil {
return nil, err
}
return colors["custom_colors"], nil
}
// Color will get a specific color from the user's profile.
func (u *User) Color(asset string) (color *UserColor, err error) {
return color, getjson(u.client, color, nil, "users/%d/colors/%s", u.ID, asset)
}
// SetColor will update the color of the given asset to as specific hex color.
func (u *User) SetColor(asset, hexcode string) error {
path := fmt.Sprintf("users/%d/colors/%s", u.ID, asset)
if hexcode[0] == '#' {
hexcode = hexcode[1:]
}
resp, err := put(u.client, path, params{"hexcode": {hexcode}})
if err != nil {
return err
}
return resp.Body.Close()
}
func getUserFile(d doer, id int, userid interface{}, opts optEnc) (*File, error) {
f := &File{client: d}
return f, getjson(d, f, opts, "/users/%v/files/%d", userid, id)
}
func (u *User) id(s string) string {
return fmt.Sprintf(s, u.ID)
}
| Courses | identifier_name |
user.go | package canvas
import (
"fmt"
"io"
"path"
"path/filepath"
"time"
)
// User is a canvas user
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Bio string `json:"bio"`
SortableName string `json:"sortable_name"`
ShortName string `json:"short_name"`
SisUserID string `json:"sis_user_id"`
SisImportID int `json:"sis_import_id"`
IntegrationID string `json:"integration_id"`
CreatedAt time.Time `json:"created_at"`
LoginID string `json:"login_id"`
AvatarURL string `json:"avatar_url"`
Enrollments []Enrollment `json:"enrollments"`
Locale string `json:"locale"`
EffectiveLocale string `json:"effective_locale"`
LastLogin time.Time `json:"last_login"`
TimeZone string `json:"time_zone"`
CanUpdateAvatar bool `json:"can_update_avatar"`
Permissions struct {
CanUpdateName bool `json:"can_update_name"`
CanUpdateAvatar bool `json:"can_update_avatar"`
LimitParentAppWebAccess bool `json:"limit_parent_app_web_access"`
} `json:"permissions"`
client doer
}
// Settings will get the user's settings.
func (u *User) Settings() (settings map[string]interface{}, err error) {
// TODO: find the settings json response and use a struct not a map
return settings, getjson(u.client, &settings, nil, "/users/%d/settings", u.ID)
}
// Courses will return the user's courses.
func (u *User) Courses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, u.id("/users/%d/courses"), optEnc(opts))
}
// FavoriteCourses returns the user's list of favorites courses.
func (u *User) FavoriteCourses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, "/users/favorites/courses", optEnc(opts))
}
// File will get a user's file by id
func (u *User) File(id int, opts ...Option) (*File, error) {
return getUserFile(u.client, id, u.ID, opts)
}
// Files will return a channel of files.
func (u *User) Files(opts ...Option) <-chan *File {
return filesChannel(
u.client, u.id("/users/%d/files"),
ConcurrentErrorHandler, opts, nil,
)
}
// ListFiles will collect all of the users files.
func (u *User) ListFiles(opts ...Option) ([]*File, error) {
return listFiles(u.client, u.id("/users/%d/files"), nil, opts)
}
// Folders returns a channel of the user's folders.
func (u *User) Folders(opts ...Option) <-chan *Folder {
return foldersChannel(
u.client, u.id("/users/%d/folders"),
ConcurrentErrorHandler, opts, nil,
)
}
// Root will get the root folder for the user's files.
func (u *User) Root(opts ...Option) (*Folder, error) {
f := &Folder{client: u.client}
return f, getjson(u.client, f, optEnc(opts), "/users/%d/folders/root", u.ID)
}
// ListFolders will return a slice of all the user's folders
func (u *User) ListFolders(opts ...Option) ([]*Folder, error) {
return listFolders(u.client, u.id("/users/%d/folders"), nil, opts)
}
// FolderPath will split the path and return a list containing
// all of the folders in the path.
func (u *User) FolderPath(pth string) ([]*Folder, error) {
pth = path.Join(u.id("/users/%d/folders/by_path"), pth)
return folderList(u.client, pth)
}
// UploadFile will upload the contents of an io.Reader to a
// new file in the user's files and return the new file.
func (u *User) UploadFile(
filename string,
r io.Reader,
opts ...Option,
) (*File, error) {
return uploadFile(
u.client, r,
u.id("/users/%d/files"),
newFileUploadParams(filename, opts),
)
}
// CreateFolder will create a new folder.
func (u *User) CreateFolder(path string, opts ...Option) (*Folder, error) {
dir, name := filepath.Split(path)
return createFolder(
u.client, dir, name, opts,
"/users/%d/folders", u.ID,
)
}
// ContextCode returns the context code for the user.
func (u *User) ContextCode() string {
return fmt.Sprintf("user_%d", u.ID)
}
// CalendarEvents gets the user's calendar events.
func (u *User) CalendarEvents(opts ...Option) (cal []CalendarEvent, err error) {
return cal, getjson(u.client, &cal, optEnc(opts), "/users/%d/calendar_events", u.ID)
}
// Bookmarks will get the user's bookmarks
func (u *User) Bookmarks(opts ...Option) (bks []Bookmark, err error) {
return bks, getjson(u.client, &bks, optEnc(opts), "users/%d/bookmarks", u.ID)
}
// CreateBookmark will create a bookmark
func (u *User) CreateBookmark(b *Bookmark) error {
return createBookmark(u.client, u.ID, b)
}
// DeleteBookmark will delete a user's bookmark.
func (u *User) DeleteBookmark(b *Bookmark) error {
return deleteBookmark(u.client, u.ID, b.ID)
}
// Profile will make a call to get the user's profile data.
func (u *User) Profile() (p *UserProfile, err error) {
return p, getjson(u.client, p, nil, "/users/%d/profile", u.ID)
}
// UserProfile is a user's profile data.
type UserProfile struct {
ID int `json:"id"`
LoginID string `json:"login_id"`
Name string `json:"name"`
PrimaryEmail string `json:"primary_email"`
ShortName string `json:"short_name"`
SortableName string `json:"sortable_name"`
TimeZone string `json:"time_zone"`
Bio string `json:"bio"`
Title string `json:"title"`
Calendar map[string]string `json:"calendar"`
LTIUserID string `json:"lti_user_id"`
AvatarURL string `json:"avatar_url"`
EffectiveLocal string `json:"effective_local"`
IntegrationID string `json:"integration_id"`
Local string `json:"local"`
}
// GradedSubmissions gets the user's graded submissions.
func (u *User) GradedSubmissions() (subs []*Submission, err error) {
return subs, getjson(u.client, &subs, nil, "/users/%d/graded_submissions", u.ID)
}
// Submission is a submission type.
type Submission struct {
// A submission type can be any of:
// - "online_text_entry"
// - "online_url"
// - "online_upload"
// - "media_recording"
Type string `json:"submission_type" url:"submission_type"`
AssignmentID int `json:"assignment_id"`
Assignment interface{} `json:"assignment"`
Course interface{} `json:"course"`
Attempt int `json:"attempt"`
Body string `json:"body,omitempty"`
Grade string `json:"grade"`
GradeMatchesCurrentSubmission bool `json:"grade_matches_current_submission"`
HTMLURL string `json:"html_url,omitempty"`
PreviewURL string `json:"preview_url"`
Score float64 `json:"score"`
Comments interface{} `json:"submission_comments"`
SubmittedAt time.Time `json:"submitted_at"`
PostedAt time.Time `json:"posted_at"`
URL string `json:"url,omitempty"`
GraderID int `json:"grader_id"`
GradedAt time.Time `json:"graded_at"`
UserID int `json:"user_id"`
User interface{} `json:"user" url:"-"`
Late bool `json:"late"`
AssignmentVisible bool `json:"assignment_visible"`
Excused bool `json:"excused"`
Missing bool `json:"missing"`
LatePolicyStatus string `json:"late_policy_status"`
PointsDeducted float64 `json:"points_deducted"`
SecondsLate int `json:"seconds_late"`
WorkflowState string `json:"workflow_state"`
ExtraAttempts int `json:"extra_attempts"`
AnonymousID string `json:"anonymous_id"`
// Used assignment submission
FileIDs []int `json:"-" url:"file_ids,omitempty"`
MediaCommentID string `json:"-" url:"media_comment_id,omitempty"`
MediaCommentType string `json:"-" url:"media_comment_type,omitempty"` // "audio" or "video"
}
// Avatars will get a list of the user's avatars.
func (u *User) Avatars() (av []Avatar, err error) {
return av, getjson(u.client, &av, nil, "/users/%d/avatars", u.ID)
}
// Avatar is the avatar data for a user.
type Avatar struct {
ID int `json:"id"`
Type string `json:"type"`
DisplayName string `json:"display_name"`
Filename string `json:"filename"`
URL string `json:"url"`
Token string `json:"token"`
ContentType string `json:"content-type"`
Size int `json:"size"`
}
// UserColor is just a hex color.
type UserColor struct {
HexCode string `json:"hexcode"`
}
// Colors will return a map of the user's custom profile colors.
func (u *User) Colors() (map[string]string, error) {
colors := make(map[string]map[string]string)
err := getjson(u.client, &colors, nil, "users/%d/colors", u.ID)
if err != nil {
return nil, err
}
return colors["custom_colors"], nil
}
// Color will get a specific color from the user's profile.
func (u *User) Color(asset string) (color *UserColor, err error) {
return color, getjson(u.client, color, nil, "users/%d/colors/%s", u.ID, asset)
}
// SetColor will update the color of the given asset to as specific hex color.
func (u *User) SetColor(asset, hexcode string) error {
path := fmt.Sprintf("users/%d/colors/%s", u.ID, asset)
if hexcode[0] == '#' {
hexcode = hexcode[1:]
}
resp, err := put(u.client, path, params{"hexcode": {hexcode}})
if err != nil {
return err
} | return f, getjson(d, f, opts, "/users/%v/files/%d", userid, id)
}
func (u *User) id(s string) string {
return fmt.Sprintf(s, u.ID)
} | return resp.Body.Close()
}
func getUserFile(d doer, id int, userid interface{}, opts optEnc) (*File, error) {
f := &File{client: d} | random_line_split |
user.go | package canvas
import (
"fmt"
"io"
"path"
"path/filepath"
"time"
)
// User is a canvas user
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Bio string `json:"bio"`
SortableName string `json:"sortable_name"`
ShortName string `json:"short_name"`
SisUserID string `json:"sis_user_id"`
SisImportID int `json:"sis_import_id"`
IntegrationID string `json:"integration_id"`
CreatedAt time.Time `json:"created_at"`
LoginID string `json:"login_id"`
AvatarURL string `json:"avatar_url"`
Enrollments []Enrollment `json:"enrollments"`
Locale string `json:"locale"`
EffectiveLocale string `json:"effective_locale"`
LastLogin time.Time `json:"last_login"`
TimeZone string `json:"time_zone"`
CanUpdateAvatar bool `json:"can_update_avatar"`
Permissions struct {
CanUpdateName bool `json:"can_update_name"`
CanUpdateAvatar bool `json:"can_update_avatar"`
LimitParentAppWebAccess bool `json:"limit_parent_app_web_access"`
} `json:"permissions"`
client doer
}
// Settings will get the user's settings.
func (u *User) Settings() (settings map[string]interface{}, err error) {
// TODO: find the settings json response and use a struct not a map
return settings, getjson(u.client, &settings, nil, "/users/%d/settings", u.ID)
}
// Courses will return the user's courses.
func (u *User) Courses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, u.id("/users/%d/courses"), optEnc(opts))
}
// FavoriteCourses returns the user's list of favorites courses.
func (u *User) FavoriteCourses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, "/users/favorites/courses", optEnc(opts))
}
// File will get a user's file by id
func (u *User) File(id int, opts ...Option) (*File, error) {
return getUserFile(u.client, id, u.ID, opts)
}
// Files will return a channel of files.
func (u *User) Files(opts ...Option) <-chan *File {
return filesChannel(
u.client, u.id("/users/%d/files"),
ConcurrentErrorHandler, opts, nil,
)
}
// ListFiles will collect all of the users files.
func (u *User) ListFiles(opts ...Option) ([]*File, error) {
return listFiles(u.client, u.id("/users/%d/files"), nil, opts)
}
// Folders returns a channel of the user's folders.
func (u *User) Folders(opts ...Option) <-chan *Folder {
return foldersChannel(
u.client, u.id("/users/%d/folders"),
ConcurrentErrorHandler, opts, nil,
)
}
// Root will get the root folder for the user's files.
func (u *User) Root(opts ...Option) (*Folder, error) {
f := &Folder{client: u.client}
return f, getjson(u.client, f, optEnc(opts), "/users/%d/folders/root", u.ID)
}
// ListFolders will return a slice of all the user's folders
func (u *User) ListFolders(opts ...Option) ([]*Folder, error) {
return listFolders(u.client, u.id("/users/%d/folders"), nil, opts)
}
// FolderPath will split the path and return a list containing
// all of the folders in the path.
func (u *User) FolderPath(pth string) ([]*Folder, error) {
pth = path.Join(u.id("/users/%d/folders/by_path"), pth)
return folderList(u.client, pth)
}
// UploadFile will upload the contents of an io.Reader to a
// new file in the user's files and return the new file.
func (u *User) UploadFile(
filename string,
r io.Reader,
opts ...Option,
) (*File, error) {
return uploadFile(
u.client, r,
u.id("/users/%d/files"),
newFileUploadParams(filename, opts),
)
}
// CreateFolder will create a new folder.
func (u *User) CreateFolder(path string, opts ...Option) (*Folder, error) {
dir, name := filepath.Split(path)
return createFolder(
u.client, dir, name, opts,
"/users/%d/folders", u.ID,
)
}
// ContextCode returns the context code for the user.
func (u *User) ContextCode() string {
return fmt.Sprintf("user_%d", u.ID)
}
// CalendarEvents gets the user's calendar events.
func (u *User) CalendarEvents(opts ...Option) (cal []CalendarEvent, err error) {
return cal, getjson(u.client, &cal, optEnc(opts), "/users/%d/calendar_events", u.ID)
}
// Bookmarks will get the user's bookmarks
func (u *User) Bookmarks(opts ...Option) (bks []Bookmark, err error) {
return bks, getjson(u.client, &bks, optEnc(opts), "users/%d/bookmarks", u.ID)
}
// CreateBookmark will create a bookmark
func (u *User) CreateBookmark(b *Bookmark) error {
return createBookmark(u.client, u.ID, b)
}
// DeleteBookmark will delete a user's bookmark.
func (u *User) DeleteBookmark(b *Bookmark) error {
return deleteBookmark(u.client, u.ID, b.ID)
}
// Profile will make a call to get the user's profile data.
func (u *User) Profile() (p *UserProfile, err error) {
return p, getjson(u.client, p, nil, "/users/%d/profile", u.ID)
}
// UserProfile is a user's profile data.
type UserProfile struct {
ID int `json:"id"`
LoginID string `json:"login_id"`
Name string `json:"name"`
PrimaryEmail string `json:"primary_email"`
ShortName string `json:"short_name"`
SortableName string `json:"sortable_name"`
TimeZone string `json:"time_zone"`
Bio string `json:"bio"`
Title string `json:"title"`
Calendar map[string]string `json:"calendar"`
LTIUserID string `json:"lti_user_id"`
AvatarURL string `json:"avatar_url"`
EffectiveLocal string `json:"effective_local"`
IntegrationID string `json:"integration_id"`
Local string `json:"local"`
}
// GradedSubmissions gets the user's graded submissions.
func (u *User) GradedSubmissions() (subs []*Submission, err error) {
return subs, getjson(u.client, &subs, nil, "/users/%d/graded_submissions", u.ID)
}
// Submission is a submission type.
type Submission struct {
// A submission type can be any of:
// - "online_text_entry"
// - "online_url"
// - "online_upload"
// - "media_recording"
Type string `json:"submission_type" url:"submission_type"`
AssignmentID int `json:"assignment_id"`
Assignment interface{} `json:"assignment"`
Course interface{} `json:"course"`
Attempt int `json:"attempt"`
Body string `json:"body,omitempty"`
Grade string `json:"grade"`
GradeMatchesCurrentSubmission bool `json:"grade_matches_current_submission"`
HTMLURL string `json:"html_url,omitempty"`
PreviewURL string `json:"preview_url"`
Score float64 `json:"score"`
Comments interface{} `json:"submission_comments"`
SubmittedAt time.Time `json:"submitted_at"`
PostedAt time.Time `json:"posted_at"`
URL string `json:"url,omitempty"`
GraderID int `json:"grader_id"`
GradedAt time.Time `json:"graded_at"`
UserID int `json:"user_id"`
User interface{} `json:"user" url:"-"`
Late bool `json:"late"`
AssignmentVisible bool `json:"assignment_visible"`
Excused bool `json:"excused"`
Missing bool `json:"missing"`
LatePolicyStatus string `json:"late_policy_status"`
PointsDeducted float64 `json:"points_deducted"`
SecondsLate int `json:"seconds_late"`
WorkflowState string `json:"workflow_state"`
ExtraAttempts int `json:"extra_attempts"`
AnonymousID string `json:"anonymous_id"`
// Used assignment submission
FileIDs []int `json:"-" url:"file_ids,omitempty"`
MediaCommentID string `json:"-" url:"media_comment_id,omitempty"`
MediaCommentType string `json:"-" url:"media_comment_type,omitempty"` // "audio" or "video"
}
// Avatars will get a list of the user's avatars.
func (u *User) Avatars() (av []Avatar, err error) {
return av, getjson(u.client, &av, nil, "/users/%d/avatars", u.ID)
}
// Avatar is the avatar data for a user.
type Avatar struct {
ID int `json:"id"`
Type string `json:"type"`
DisplayName string `json:"display_name"`
Filename string `json:"filename"`
URL string `json:"url"`
Token string `json:"token"`
ContentType string `json:"content-type"`
Size int `json:"size"`
}
// UserColor is just a hex color.
type UserColor struct {
HexCode string `json:"hexcode"`
}
// Colors will return a map of the user's custom profile colors.
func (u *User) Colors() (map[string]string, error) {
colors := make(map[string]map[string]string)
err := getjson(u.client, &colors, nil, "users/%d/colors", u.ID)
if err != nil {
return nil, err
}
return colors["custom_colors"], nil
}
// Color will get a specific color from the user's profile.
func (u *User) Color(asset string) (color *UserColor, err error) {
return color, getjson(u.client, color, nil, "users/%d/colors/%s", u.ID, asset)
}
// SetColor will update the color of the given asset to as specific hex color.
func (u *User) SetColor(asset, hexcode string) error {
path := fmt.Sprintf("users/%d/colors/%s", u.ID, asset)
if hexcode[0] == '#' {
hexcode = hexcode[1:]
}
resp, err := put(u.client, path, params{"hexcode": {hexcode}})
if err != nil |
return resp.Body.Close()
}
func getUserFile(d doer, id int, userid interface{}, opts optEnc) (*File, error) {
f := &File{client: d}
return f, getjson(d, f, opts, "/users/%v/files/%d", userid, id)
}
func (u *User) id(s string) string {
return fmt.Sprintf(s, u.ID)
}
| {
return err
} | conditional_block |
user.go | package canvas
import (
"fmt"
"io"
"path"
"path/filepath"
"time"
)
// User is a canvas user
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Bio string `json:"bio"`
SortableName string `json:"sortable_name"`
ShortName string `json:"short_name"`
SisUserID string `json:"sis_user_id"`
SisImportID int `json:"sis_import_id"`
IntegrationID string `json:"integration_id"`
CreatedAt time.Time `json:"created_at"`
LoginID string `json:"login_id"`
AvatarURL string `json:"avatar_url"`
Enrollments []Enrollment `json:"enrollments"`
Locale string `json:"locale"`
EffectiveLocale string `json:"effective_locale"`
LastLogin time.Time `json:"last_login"`
TimeZone string `json:"time_zone"`
CanUpdateAvatar bool `json:"can_update_avatar"`
Permissions struct {
CanUpdateName bool `json:"can_update_name"`
CanUpdateAvatar bool `json:"can_update_avatar"`
LimitParentAppWebAccess bool `json:"limit_parent_app_web_access"`
} `json:"permissions"`
client doer
}
// Settings will get the user's settings.
func (u *User) Settings() (settings map[string]interface{}, err error) {
// TODO: find the settings json response and use a struct not a map
return settings, getjson(u.client, &settings, nil, "/users/%d/settings", u.ID)
}
// Courses will return the user's courses.
func (u *User) Courses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, u.id("/users/%d/courses"), optEnc(opts))
}
// FavoriteCourses returns the user's list of favorites courses.
func (u *User) FavoriteCourses(opts ...Option) ([]*Course, error) {
return getCourses(u.client, "/users/favorites/courses", optEnc(opts))
}
// File will get a user's file by id
func (u *User) File(id int, opts ...Option) (*File, error) {
return getUserFile(u.client, id, u.ID, opts)
}
// Files will return a channel of files.
func (u *User) Files(opts ...Option) <-chan *File {
return filesChannel(
u.client, u.id("/users/%d/files"),
ConcurrentErrorHandler, opts, nil,
)
}
// ListFiles will collect all of the users files.
func (u *User) ListFiles(opts ...Option) ([]*File, error) {
return listFiles(u.client, u.id("/users/%d/files"), nil, opts)
}
// Folders returns a channel of the user's folders.
func (u *User) Folders(opts ...Option) <-chan *Folder {
return foldersChannel(
u.client, u.id("/users/%d/folders"),
ConcurrentErrorHandler, opts, nil,
)
}
// Root will get the root folder for the user's files.
func (u *User) Root(opts ...Option) (*Folder, error) {
f := &Folder{client: u.client}
return f, getjson(u.client, f, optEnc(opts), "/users/%d/folders/root", u.ID)
}
// ListFolders will return a slice of all the user's folders
func (u *User) ListFolders(opts ...Option) ([]*Folder, error) {
return listFolders(u.client, u.id("/users/%d/folders"), nil, opts)
}
// FolderPath will split the path and return a list containing
// all of the folders in the path.
func (u *User) FolderPath(pth string) ([]*Folder, error) {
pth = path.Join(u.id("/users/%d/folders/by_path"), pth)
return folderList(u.client, pth)
}
// UploadFile will upload the contents of an io.Reader to a
// new file in the user's files and return the new file.
func (u *User) UploadFile(
filename string,
r io.Reader,
opts ...Option,
) (*File, error) {
return uploadFile(
u.client, r,
u.id("/users/%d/files"),
newFileUploadParams(filename, opts),
)
}
// CreateFolder will create a new folder.
func (u *User) CreateFolder(path string, opts ...Option) (*Folder, error) {
dir, name := filepath.Split(path)
return createFolder(
u.client, dir, name, opts,
"/users/%d/folders", u.ID,
)
}
// ContextCode returns the context code for the user.
func (u *User) ContextCode() string {
return fmt.Sprintf("user_%d", u.ID)
}
// CalendarEvents gets the user's calendar events.
func (u *User) CalendarEvents(opts ...Option) (cal []CalendarEvent, err error) {
return cal, getjson(u.client, &cal, optEnc(opts), "/users/%d/calendar_events", u.ID)
}
// Bookmarks will get the user's bookmarks
func (u *User) Bookmarks(opts ...Option) (bks []Bookmark, err error) {
return bks, getjson(u.client, &bks, optEnc(opts), "users/%d/bookmarks", u.ID)
}
// CreateBookmark will create a bookmark
func (u *User) CreateBookmark(b *Bookmark) error {
return createBookmark(u.client, u.ID, b)
}
// DeleteBookmark will delete a user's bookmark.
func (u *User) DeleteBookmark(b *Bookmark) error {
return deleteBookmark(u.client, u.ID, b.ID)
}
// Profile will make a call to get the user's profile data.
func (u *User) Profile() (p *UserProfile, err error) {
return p, getjson(u.client, p, nil, "/users/%d/profile", u.ID)
}
// UserProfile is a user's profile data.
type UserProfile struct {
ID int `json:"id"`
LoginID string `json:"login_id"`
Name string `json:"name"`
PrimaryEmail string `json:"primary_email"`
ShortName string `json:"short_name"`
SortableName string `json:"sortable_name"`
TimeZone string `json:"time_zone"`
Bio string `json:"bio"`
Title string `json:"title"`
Calendar map[string]string `json:"calendar"`
LTIUserID string `json:"lti_user_id"`
AvatarURL string `json:"avatar_url"`
EffectiveLocal string `json:"effective_local"`
IntegrationID string `json:"integration_id"`
Local string `json:"local"`
}
// GradedSubmissions gets the user's graded submissions.
func (u *User) GradedSubmissions() (subs []*Submission, err error) {
return subs, getjson(u.client, &subs, nil, "/users/%d/graded_submissions", u.ID)
}
// Submission is a submission type.
type Submission struct {
// A submission type can be any of:
// - "online_text_entry"
// - "online_url"
// - "online_upload"
// - "media_recording"
Type string `json:"submission_type" url:"submission_type"`
AssignmentID int `json:"assignment_id"`
Assignment interface{} `json:"assignment"`
Course interface{} `json:"course"`
Attempt int `json:"attempt"`
Body string `json:"body,omitempty"`
Grade string `json:"grade"`
GradeMatchesCurrentSubmission bool `json:"grade_matches_current_submission"`
HTMLURL string `json:"html_url,omitempty"`
PreviewURL string `json:"preview_url"`
Score float64 `json:"score"`
Comments interface{} `json:"submission_comments"`
SubmittedAt time.Time `json:"submitted_at"`
PostedAt time.Time `json:"posted_at"`
URL string `json:"url,omitempty"`
GraderID int `json:"grader_id"`
GradedAt time.Time `json:"graded_at"`
UserID int `json:"user_id"`
User interface{} `json:"user" url:"-"`
Late bool `json:"late"`
AssignmentVisible bool `json:"assignment_visible"`
Excused bool `json:"excused"`
Missing bool `json:"missing"`
LatePolicyStatus string `json:"late_policy_status"`
PointsDeducted float64 `json:"points_deducted"`
SecondsLate int `json:"seconds_late"`
WorkflowState string `json:"workflow_state"`
ExtraAttempts int `json:"extra_attempts"`
AnonymousID string `json:"anonymous_id"`
// Used assignment submission
FileIDs []int `json:"-" url:"file_ids,omitempty"`
MediaCommentID string `json:"-" url:"media_comment_id,omitempty"`
MediaCommentType string `json:"-" url:"media_comment_type,omitempty"` // "audio" or "video"
}
// Avatars will get a list of the user's avatars.
func (u *User) Avatars() (av []Avatar, err error) {
return av, getjson(u.client, &av, nil, "/users/%d/avatars", u.ID)
}
// Avatar is the avatar data for a user.
type Avatar struct {
ID int `json:"id"`
Type string `json:"type"`
DisplayName string `json:"display_name"`
Filename string `json:"filename"`
URL string `json:"url"`
Token string `json:"token"`
ContentType string `json:"content-type"`
Size int `json:"size"`
}
// UserColor is just a hex color.
type UserColor struct {
HexCode string `json:"hexcode"`
}
// Colors will return a map of the user's custom profile colors.
func (u *User) Colors() (map[string]string, error) {
colors := make(map[string]map[string]string)
err := getjson(u.client, &colors, nil, "users/%d/colors", u.ID)
if err != nil {
return nil, err
}
return colors["custom_colors"], nil
}
// Color will get a specific color from the user's profile.
func (u *User) Color(asset string) (color *UserColor, err error) {
return color, getjson(u.client, color, nil, "users/%d/colors/%s", u.ID, asset)
}
// SetColor will update the color of the given asset to as specific hex color.
func (u *User) SetColor(asset, hexcode string) error |
func getUserFile(d doer, id int, userid interface{}, opts optEnc) (*File, error) {
f := &File{client: d}
return f, getjson(d, f, opts, "/users/%v/files/%d", userid, id)
}
func (u *User) id(s string) string {
return fmt.Sprintf(s, u.ID)
}
| {
path := fmt.Sprintf("users/%d/colors/%s", u.ID, asset)
if hexcode[0] == '#' {
hexcode = hexcode[1:]
}
resp, err := put(u.client, path, params{"hexcode": {hexcode}})
if err != nil {
return err
}
return resp.Body.Close()
} | identifier_body |
actions.py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from typing import Union
from geopy.geocoders import Nominatim
import csv
import pandas as pd
import geocoder
from rasa_sdk.events import SlotSet, AllSlotsReset
import requests
import json
from random import randint
import datetime
import os
import yaml
import csv
import pickle
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import string
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pickle
from sklearn.metrics import classification_report
names=[]
phones=[]
ages=[]
symptoms=[]
sides=[]
intensities=[]
locations=[]
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "form_questions"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['name','phone','age']#,'patient id'
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
dispatcher.utter_message("Great! You're registered")
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'name': [self.from_entity(entity='name', intent='form_entry'),
self.from_text()],
'age': [self.from_entity(entity='age', intent='form_entry'),
self.from_text()],
'phone': [self.from_entity(entity='phone', intent='form_entry'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "doc_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['doctor']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
dispatcher.utter_message(text="Processing..")
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'doctor': [self.from_entity(entity='doctor', intent='doc_name'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
return "location_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['location']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
location=tracker.get_slot('location')
# dictionary of lists
action_place_search()
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'location': [self.from_entity(entity='location', intent='location_entry'),
self.from_text()]
}
class Side(Action):
def name(self):
return 'action_side'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
buttons = [{'title': 'na', 'payload': '/side{"side":"na"}'},{'title': 'left', 'payload': '/side{"side":"left"}'}, {'title': 'right', 'payload': '/side{"side":"right"}'}, {'title': 'both', 'payload': '/side{"side":"both"}'}]
dispatcher.utter_message(template='utter_ask_side',buttons=buttons)
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'side': [self.from_entity(entity='side', intent='side'),
self.from_text()]
}
class SymptomSearch(Action):
def name(self):
return 'symptom_search'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
dispatcher.utter_message('Please choose the intensity of discomformt on a scale of 1-10: ')
buttons = [{'title': "1", 'payload': '/intensity{"intensity":"1"}'}, {'title': "2", 'payload': '/intensity{"intensity":"2"}'}, {'title': "3", 'payload': '/intensity{"intensity":"3"}'}, {'title': "4", 'payload': '/intensity{"intensity":"4"}'},{'title': "5", 'payload': '/intensity{"intensity":"5"}'}, {'title': "6", 'payload': '/intensity{"intensity":"6"}'},{'title': "7", 'payload': '/intensity{"intensity":"7"}'}, {'title': "8", 'payload': '/intensity{"intensity":"8"}'},{'title': "9", 'payload': '/intensity{"intensity":"9"}'}, {'title': "10", 'payload': '/intensity{"intensity":"10"}'}]
dispatcher.utter_button_template('utter_ask_intensity', buttons, tracker)
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'intensity': [self.from_entity(entity='intensity', intent='intensity'),
self.from_text()]
}
class ActionIntensity(Action):
def name(self):
return 'action_intensity'
def run(self, dispatcher, tracker, domain):
intense=int(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
symp=tracker.get_slot('symptom')
filename = r'C:\Users\mehak\Desktop\demobot\chatbot_model_4.sav'
loaded_model = pickle.load(open(filename, 'rb'))
probabilty= pd.DataFrame(loaded_model.predict_proba([symp]), columns=loaded_model.classes_)
probability_score=probabilty.melt()
symptom_list=[]
if intense>8:
dispatcher.utter_message("Please call 911 for emergencies")
for i in range(0,3):
num=probability_score.nlargest(3,'value')['value']
if max(num)>95:
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'}]
else:
for i in range(0,3):
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'},{'title': symptom_list[1] , 'payload': '/picking_specialty'},{'title': symptom_list[2] , 'payload': '/picking_specialty'}]
dispatcher.utter_button_template('utter_ask_spec', buttons, tracker)
class Summarize(Action):
def name(self) -> Text:
return 'summarize'
def run(self, dispatcher, tracker, domain):
name=tracker.get_slot('name')
phone=tracker.get_slot('phone')
age=tracker.get_slot('age')
symptom=tracker.get_slot('symptom')
intense=str(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
location=tracker.get_slot('location')
names.append(name)
phones.append(phone)
ages.append(age)
symptoms.append(symptom)
locations.append(location)
intensities.append(intense)
sides.append(side)
dict = {'Name': names, 'Phone Number': phones, 'Age': ages, 'Symptoms': symptoms, 'Location':locations, 'Intensity of pain':intensities,'Location of Pain':sides}
df = pd.DataFrame(dict)
df.to_csv('Patient Data.csv', header=False, index=False)
dispatcher.utter_message("Here's a summary of the information your doctor will be provided with: \n \tName: "+str(name)+"\n \tAge: "+str(age)+"\n \tPhone: "+str(phone)+"\n \tSymptoms: "+str(symptom)+"\n \tIntensity of pain: "+str(intense)+"\n \tSide of pain (if applicable):"+str(side))
class ActionPlaceSearch(Action):
def name(self):
#define the name of the action
return 'action_place_search'
def run(self, dispatcher, tracker, domain):
#retrieve slot values
query = tracker.get_slot('amenity')
radius = 200
#retrieve google api key
with open("./ga_credentials.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
key = cfg['credentials']['GOOGLE_KEY']
import requests
location=tracker.get_slot('location')
geolocator = Nominatim(user_agent="demobot")
location = geolocator.geocode(location)
place = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}'.format(location.latitude, location.longitude, radius, query, key)).json()
if len(place['results'])==0:
dispatcher.utter_message("Sorry, I didn't find anything")
return []#SlotSet('location_match', 'none')
else:
for i in place['results']:
if 'rating' and 'vicinity' in i.keys():
name = i['name']
rating = i['rating']
address = i['vicinity']
if i['opening_hours']['open_now']==True:
opening_hours = 'open'
else:
opening_hours = 'closed'
break
speech = "I found a {} called {} based on your specified parameters.".format(query, name)
dispatcher.utter_message(speech) #send the response back to the user | #SlotSet('location_match', 'one'), SlotSet('rating', rating), SlotSet('address', address), SlotSet('opening_hours', opening_hours)
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_check_number"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
number=tracker.get_slot("phone")
name=tracker.get_slot("name")
if number in phones:
dispatcher.utter_message(text="You're already registered")
else:
names.append(name)
dispatcher.utter_message(text="You're added to the list!")
return []
class ActionFallback(Action):
def name(self) -> Text:
return "action_default_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Sorry, I don't understand. Could you rephrase that?")
return []
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return [] | return [] #set returned details as slots | random_line_split |
actions.py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from typing import Union
from geopy.geocoders import Nominatim
import csv
import pandas as pd
import geocoder
from rasa_sdk.events import SlotSet, AllSlotsReset
import requests
import json
from random import randint
import datetime
import os
import yaml
import csv
import pickle
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import string
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pickle
from sklearn.metrics import classification_report
names=[]
phones=[]
ages=[]
symptoms=[]
sides=[]
intensities=[]
locations=[]
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "form_questions"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['name','phone','age']#,'patient id'
def | (
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
dispatcher.utter_message("Great! You're registered")
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'name': [self.from_entity(entity='name', intent='form_entry'),
self.from_text()],
'age': [self.from_entity(entity='age', intent='form_entry'),
self.from_text()],
'phone': [self.from_entity(entity='phone', intent='form_entry'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "doc_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['doctor']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
dispatcher.utter_message(text="Processing..")
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'doctor': [self.from_entity(entity='doctor', intent='doc_name'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
return "location_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['location']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
location=tracker.get_slot('location')
# dictionary of lists
action_place_search()
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'location': [self.from_entity(entity='location', intent='location_entry'),
self.from_text()]
}
class Side(Action):
def name(self):
return 'action_side'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
buttons = [{'title': 'na', 'payload': '/side{"side":"na"}'},{'title': 'left', 'payload': '/side{"side":"left"}'}, {'title': 'right', 'payload': '/side{"side":"right"}'}, {'title': 'both', 'payload': '/side{"side":"both"}'}]
dispatcher.utter_message(template='utter_ask_side',buttons=buttons)
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'side': [self.from_entity(entity='side', intent='side'),
self.from_text()]
}
class SymptomSearch(Action):
def name(self):
return 'symptom_search'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
dispatcher.utter_message('Please choose the intensity of discomformt on a scale of 1-10: ')
buttons = [{'title': "1", 'payload': '/intensity{"intensity":"1"}'}, {'title': "2", 'payload': '/intensity{"intensity":"2"}'}, {'title': "3", 'payload': '/intensity{"intensity":"3"}'}, {'title': "4", 'payload': '/intensity{"intensity":"4"}'},{'title': "5", 'payload': '/intensity{"intensity":"5"}'}, {'title': "6", 'payload': '/intensity{"intensity":"6"}'},{'title': "7", 'payload': '/intensity{"intensity":"7"}'}, {'title': "8", 'payload': '/intensity{"intensity":"8"}'},{'title': "9", 'payload': '/intensity{"intensity":"9"}'}, {'title': "10", 'payload': '/intensity{"intensity":"10"}'}]
dispatcher.utter_button_template('utter_ask_intensity', buttons, tracker)
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'intensity': [self.from_entity(entity='intensity', intent='intensity'),
self.from_text()]
}
class ActionIntensity(Action):
def name(self):
return 'action_intensity'
def run(self, dispatcher, tracker, domain):
intense=int(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
symp=tracker.get_slot('symptom')
filename = r'C:\Users\mehak\Desktop\demobot\chatbot_model_4.sav'
loaded_model = pickle.load(open(filename, 'rb'))
probabilty= pd.DataFrame(loaded_model.predict_proba([symp]), columns=loaded_model.classes_)
probability_score=probabilty.melt()
symptom_list=[]
if intense>8:
dispatcher.utter_message("Please call 911 for emergencies")
for i in range(0,3):
num=probability_score.nlargest(3,'value')['value']
if max(num)>95:
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'}]
else:
for i in range(0,3):
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'},{'title': symptom_list[1] , 'payload': '/picking_specialty'},{'title': symptom_list[2] , 'payload': '/picking_specialty'}]
dispatcher.utter_button_template('utter_ask_spec', buttons, tracker)
class Summarize(Action):
def name(self) -> Text:
return 'summarize'
def run(self, dispatcher, tracker, domain):
name=tracker.get_slot('name')
phone=tracker.get_slot('phone')
age=tracker.get_slot('age')
symptom=tracker.get_slot('symptom')
intense=str(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
location=tracker.get_slot('location')
names.append(name)
phones.append(phone)
ages.append(age)
symptoms.append(symptom)
locations.append(location)
intensities.append(intense)
sides.append(side)
dict = {'Name': names, 'Phone Number': phones, 'Age': ages, 'Symptoms': symptoms, 'Location':locations, 'Intensity of pain':intensities,'Location of Pain':sides}
df = pd.DataFrame(dict)
df.to_csv('Patient Data.csv', header=False, index=False)
dispatcher.utter_message("Here's a summary of the information your doctor will be provided with: \n \tName: "+str(name)+"\n \tAge: "+str(age)+"\n \tPhone: "+str(phone)+"\n \tSymptoms: "+str(symptom)+"\n \tIntensity of pain: "+str(intense)+"\n \tSide of pain (if applicable):"+str(side))
class ActionPlaceSearch(Action):
def name(self):
#define the name of the action
return 'action_place_search'
def run(self, dispatcher, tracker, domain):
#retrieve slot values
query = tracker.get_slot('amenity')
radius = 200
#retrieve google api key
with open("./ga_credentials.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
key = cfg['credentials']['GOOGLE_KEY']
import requests
location=tracker.get_slot('location')
geolocator = Nominatim(user_agent="demobot")
location = geolocator.geocode(location)
place = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}'.format(location.latitude, location.longitude, radius, query, key)).json()
if len(place['results'])==0:
dispatcher.utter_message("Sorry, I didn't find anything")
return []#SlotSet('location_match', 'none')
else:
for i in place['results']:
if 'rating' and 'vicinity' in i.keys():
name = i['name']
rating = i['rating']
address = i['vicinity']
if i['opening_hours']['open_now']==True:
opening_hours = 'open'
else:
opening_hours = 'closed'
break
speech = "I found a {} called {} based on your specified parameters.".format(query, name)
dispatcher.utter_message(speech) #send the response back to the user
return [] #set returned details as slots
#SlotSet('location_match', 'one'), SlotSet('rating', rating), SlotSet('address', address), SlotSet('opening_hours', opening_hours)
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_check_number"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
number=tracker.get_slot("phone")
name=tracker.get_slot("name")
if number in phones:
dispatcher.utter_message(text="You're already registered")
else:
names.append(name)
dispatcher.utter_message(text="You're added to the list!")
return []
class ActionFallback(Action):
def name(self) -> Text:
return "action_default_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Sorry, I don't understand. Could you rephrase that?")
return []
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return []
| submit | identifier_name |
actions.py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from typing import Union
from geopy.geocoders import Nominatim
import csv
import pandas as pd
import geocoder
from rasa_sdk.events import SlotSet, AllSlotsReset
import requests
import json
from random import randint
import datetime
import os
import yaml
import csv
import pickle
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import string
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pickle
from sklearn.metrics import classification_report
names=[]
phones=[]
ages=[]
symptoms=[]
sides=[]
intensities=[]
locations=[]
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "form_questions"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['name','phone','age']#,'patient id'
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
dispatcher.utter_message("Great! You're registered")
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'name': [self.from_entity(entity='name', intent='form_entry'),
self.from_text()],
'age': [self.from_entity(entity='age', intent='form_entry'),
self.from_text()],
'phone': [self.from_entity(entity='phone', intent='form_entry'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "doc_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['doctor']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
dispatcher.utter_message(text="Processing..")
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'doctor': [self.from_entity(entity='doctor', intent='doc_name'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
return "location_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['location']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
location=tracker.get_slot('location')
# dictionary of lists
action_place_search()
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'location': [self.from_entity(entity='location', intent='location_entry'),
self.from_text()]
}
class Side(Action):
def name(self):
return 'action_side'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
buttons = [{'title': 'na', 'payload': '/side{"side":"na"}'},{'title': 'left', 'payload': '/side{"side":"left"}'}, {'title': 'right', 'payload': '/side{"side":"right"}'}, {'title': 'both', 'payload': '/side{"side":"both"}'}]
dispatcher.utter_message(template='utter_ask_side',buttons=buttons)
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'side': [self.from_entity(entity='side', intent='side'),
self.from_text()]
}
class SymptomSearch(Action):
def name(self):
return 'symptom_search'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
dispatcher.utter_message('Please choose the intensity of discomformt on a scale of 1-10: ')
buttons = [{'title': "1", 'payload': '/intensity{"intensity":"1"}'}, {'title': "2", 'payload': '/intensity{"intensity":"2"}'}, {'title': "3", 'payload': '/intensity{"intensity":"3"}'}, {'title': "4", 'payload': '/intensity{"intensity":"4"}'},{'title': "5", 'payload': '/intensity{"intensity":"5"}'}, {'title': "6", 'payload': '/intensity{"intensity":"6"}'},{'title': "7", 'payload': '/intensity{"intensity":"7"}'}, {'title': "8", 'payload': '/intensity{"intensity":"8"}'},{'title': "9", 'payload': '/intensity{"intensity":"9"}'}, {'title': "10", 'payload': '/intensity{"intensity":"10"}'}]
dispatcher.utter_button_template('utter_ask_intensity', buttons, tracker)
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'intensity': [self.from_entity(entity='intensity', intent='intensity'),
self.from_text()]
}
class ActionIntensity(Action):
def name(self):
return 'action_intensity'
def run(self, dispatcher, tracker, domain):
intense=int(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
symp=tracker.get_slot('symptom')
filename = r'C:\Users\mehak\Desktop\demobot\chatbot_model_4.sav'
loaded_model = pickle.load(open(filename, 'rb'))
probabilty= pd.DataFrame(loaded_model.predict_proba([symp]), columns=loaded_model.classes_)
probability_score=probabilty.melt()
symptom_list=[]
if intense>8:
dispatcher.utter_message("Please call 911 for emergencies")
for i in range(0,3):
num=probability_score.nlargest(3,'value')['value']
if max(num)>95:
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'}]
else:
for i in range(0,3):
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'},{'title': symptom_list[1] , 'payload': '/picking_specialty'},{'title': symptom_list[2] , 'payload': '/picking_specialty'}]
dispatcher.utter_button_template('utter_ask_spec', buttons, tracker)
class Summarize(Action):
def name(self) -> Text:
return 'summarize'
def run(self, dispatcher, tracker, domain):
name=tracker.get_slot('name')
phone=tracker.get_slot('phone')
age=tracker.get_slot('age')
symptom=tracker.get_slot('symptom')
intense=str(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
location=tracker.get_slot('location')
names.append(name)
phones.append(phone)
ages.append(age)
symptoms.append(symptom)
locations.append(location)
intensities.append(intense)
sides.append(side)
dict = {'Name': names, 'Phone Number': phones, 'Age': ages, 'Symptoms': symptoms, 'Location':locations, 'Intensity of pain':intensities,'Location of Pain':sides}
df = pd.DataFrame(dict)
df.to_csv('Patient Data.csv', header=False, index=False)
dispatcher.utter_message("Here's a summary of the information your doctor will be provided with: \n \tName: "+str(name)+"\n \tAge: "+str(age)+"\n \tPhone: "+str(phone)+"\n \tSymptoms: "+str(symptom)+"\n \tIntensity of pain: "+str(intense)+"\n \tSide of pain (if applicable):"+str(side))
class ActionPlaceSearch(Action):
def name(self):
#define the name of the action
return 'action_place_search'
def run(self, dispatcher, tracker, domain):
#retrieve slot values
query = tracker.get_slot('amenity')
radius = 200
#retrieve google api key
with open("./ga_credentials.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
key = cfg['credentials']['GOOGLE_KEY']
import requests
location=tracker.get_slot('location')
geolocator = Nominatim(user_agent="demobot")
location = geolocator.geocode(location)
place = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}'.format(location.latitude, location.longitude, radius, query, key)).json()
if len(place['results'])==0:
dispatcher.utter_message("Sorry, I didn't find anything")
return []#SlotSet('location_match', 'none')
else:
for i in place['results']:
if 'rating' and 'vicinity' in i.keys():
name = i['name']
rating = i['rating']
address = i['vicinity']
if i['opening_hours']['open_now']==True:
opening_hours = 'open'
else:
opening_hours = 'closed'
break
speech = "I found a {} called {} based on your specified parameters.".format(query, name)
dispatcher.utter_message(speech) #send the response back to the user
return [] #set returned details as slots
#SlotSet('location_match', 'one'), SlotSet('rating', rating), SlotSet('address', address), SlotSet('opening_hours', opening_hours)
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_check_number"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
|
class ActionFallback(Action):
def name(self) -> Text:
return "action_default_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Sorry, I don't understand. Could you rephrase that?")
return []
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return []
| number=tracker.get_slot("phone")
name=tracker.get_slot("name")
if number in phones:
dispatcher.utter_message(text="You're already registered")
else:
names.append(name)
dispatcher.utter_message(text="You're added to the list!")
return [] | identifier_body |
actions.py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from typing import Union
from geopy.geocoders import Nominatim
import csv
import pandas as pd
import geocoder
from rasa_sdk.events import SlotSet, AllSlotsReset
import requests
import json
from random import randint
import datetime
import os
import yaml
import csv
import pickle
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import string
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pickle
from sklearn.metrics import classification_report
names=[]
phones=[]
ages=[]
symptoms=[]
sides=[]
intensities=[]
locations=[]
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "form_questions"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['name','phone','age']#,'patient id'
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
dispatcher.utter_message("Great! You're registered")
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'name': [self.from_entity(entity='name', intent='form_entry'),
self.from_text()],
'age': [self.from_entity(entity='age', intent='form_entry'),
self.from_text()],
'phone': [self.from_entity(entity='phone', intent='form_entry'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
self.sn=0
return "doc_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['doctor']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
dispatcher.utter_message(text="Processing..")
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'doctor': [self.from_entity(entity='doctor', intent='doc_name'),
self.from_text()]
}
class ActionForm(FormAction):
def name(self) -> Text:
return "location_form"
@staticmethod
def required_slots(tracker:Tracker) -> List[Text]:
return ['location']#,'patient id'
def submit(self, dispatcher: CollectingDispatcher,
tracker:Tracker, domain: Dict[Text,Any],
) -> List[Dict]:
location=tracker.get_slot('location')
# dictionary of lists
action_place_search()
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'location': [self.from_entity(entity='location', intent='location_entry'),
self.from_text()]
}
class Side(Action):
def name(self):
return 'action_side'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
buttons = [{'title': 'na', 'payload': '/side{"side":"na"}'},{'title': 'left', 'payload': '/side{"side":"left"}'}, {'title': 'right', 'payload': '/side{"side":"right"}'}, {'title': 'both', 'payload': '/side{"side":"both"}'}]
dispatcher.utter_message(template='utter_ask_side',buttons=buttons)
return[]
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'side': [self.from_entity(entity='side', intent='side'),
self.from_text()]
}
class SymptomSearch(Action):
def name(self):
return 'symptom_search'
def run(self,dispatcher,tracker,domain):
symp=tracker.get_slot('symptom')
dispatcher.utter_message('Please choose the intensity of discomformt on a scale of 1-10: ')
buttons = [{'title': "1", 'payload': '/intensity{"intensity":"1"}'}, {'title': "2", 'payload': '/intensity{"intensity":"2"}'}, {'title': "3", 'payload': '/intensity{"intensity":"3"}'}, {'title': "4", 'payload': '/intensity{"intensity":"4"}'},{'title': "5", 'payload': '/intensity{"intensity":"5"}'}, {'title': "6", 'payload': '/intensity{"intensity":"6"}'},{'title': "7", 'payload': '/intensity{"intensity":"7"}'}, {'title': "8", 'payload': '/intensity{"intensity":"8"}'},{'title': "9", 'payload': '/intensity{"intensity":"9"}'}, {'title': "10", 'payload': '/intensity{"intensity":"10"}'}]
dispatcher.utter_button_template('utter_ask_intensity', buttons, tracker)
return []
def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:
return{
'intensity': [self.from_entity(entity='intensity', intent='intensity'),
self.from_text()]
}
class ActionIntensity(Action):
def name(self):
return 'action_intensity'
def run(self, dispatcher, tracker, domain):
intense=int(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
symp=tracker.get_slot('symptom')
filename = r'C:\Users\mehak\Desktop\demobot\chatbot_model_4.sav'
loaded_model = pickle.load(open(filename, 'rb'))
probabilty= pd.DataFrame(loaded_model.predict_proba([symp]), columns=loaded_model.classes_)
probability_score=probabilty.melt()
symptom_list=[]
if intense>8:
dispatcher.utter_message("Please call 911 for emergencies")
for i in range(0,3):
num=probability_score.nlargest(3,'value')['value']
if max(num)>95:
|
else:
for i in range(0,3):
num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'},{'title': symptom_list[1] , 'payload': '/picking_specialty'},{'title': symptom_list[2] , 'payload': '/picking_specialty'}]
dispatcher.utter_button_template('utter_ask_spec', buttons, tracker)
class Summarize(Action):
def name(self) -> Text:
return 'summarize'
def run(self, dispatcher, tracker, domain):
name=tracker.get_slot('name')
phone=tracker.get_slot('phone')
age=tracker.get_slot('age')
symptom=tracker.get_slot('symptom')
intense=str(tracker.get_slot('intensity'))
side=tracker.get_slot('side')
location=tracker.get_slot('location')
names.append(name)
phones.append(phone)
ages.append(age)
symptoms.append(symptom)
locations.append(location)
intensities.append(intense)
sides.append(side)
dict = {'Name': names, 'Phone Number': phones, 'Age': ages, 'Symptoms': symptoms, 'Location':locations, 'Intensity of pain':intensities,'Location of Pain':sides}
df = pd.DataFrame(dict)
df.to_csv('Patient Data.csv', header=False, index=False)
dispatcher.utter_message("Here's a summary of the information your doctor will be provided with: \n \tName: "+str(name)+"\n \tAge: "+str(age)+"\n \tPhone: "+str(phone)+"\n \tSymptoms: "+str(symptom)+"\n \tIntensity of pain: "+str(intense)+"\n \tSide of pain (if applicable):"+str(side))
class ActionPlaceSearch(Action):
def name(self):
#define the name of the action
return 'action_place_search'
def run(self, dispatcher, tracker, domain):
#retrieve slot values
query = tracker.get_slot('amenity')
radius = 200
#retrieve google api key
with open("./ga_credentials.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
key = cfg['credentials']['GOOGLE_KEY']
import requests
location=tracker.get_slot('location')
geolocator = Nominatim(user_agent="demobot")
location = geolocator.geocode(location)
place = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}'.format(location.latitude, location.longitude, radius, query, key)).json()
if len(place['results'])==0:
dispatcher.utter_message("Sorry, I didn't find anything")
return []#SlotSet('location_match', 'none')
else:
for i in place['results']:
if 'rating' and 'vicinity' in i.keys():
name = i['name']
rating = i['rating']
address = i['vicinity']
if i['opening_hours']['open_now']==True:
opening_hours = 'open'
else:
opening_hours = 'closed'
break
speech = "I found a {} called {} based on your specified parameters.".format(query, name)
dispatcher.utter_message(speech) #send the response back to the user
return [] #set returned details as slots
#SlotSet('location_match', 'one'), SlotSet('rating', rating), SlotSet('address', address), SlotSet('opening_hours', opening_hours)
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_check_number"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
number=tracker.get_slot("phone")
name=tracker.get_slot("name")
if number in phones:
dispatcher.utter_message(text="You're already registered")
else:
names.append(name)
dispatcher.utter_message(text="You're added to the list!")
return []
class ActionFallback(Action):
def name(self) -> Text:
return "action_default_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Sorry, I don't understand. Could you rephrase that?")
return []
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return []
| num=probability_score.nlargest(3,'value')['variable'].index[i]
symptom_list.append(probability_score.nlargest(3,'value')['variable'][num])
buttons = [{'title': symptom_list[0], 'payload': '/picking_specialty'}] | conditional_block |
suite.go | package testutil
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"reflect"
"time"
sdkmath "cosmossdk.io/math"
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/server/config"
etherminttests "github.com/evmos/ethermint/tests"
etherminttypes "github.com/evmos/ethermint/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/evmutil/keeper"
"github.com/kava-labs/kava/x/evmutil/types"
)
type Suite struct {
suite.Suite
App app.TestApp
Ctx sdk.Context
Address common.Address
BankKeeper bankkeeper.Keeper
AccountKeeper authkeeper.AccountKeeper
Keeper keeper.Keeper
EvmBankKeeper keeper.EvmBankKeeper
Addrs []sdk.AccAddress
EvmModuleAddr sdk.AccAddress
QueryClient types.QueryClient
QueryClientEvm evmtypes.QueryClient
Key1 *ethsecp256k1.PrivKey
Key1Addr types.InternalEVMAddress
Key2 *ethsecp256k1.PrivKey
}
func (suite *Suite) SetupTest() {
tApp := app.NewTestApp()
suite.Ctx = tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
suite.App = tApp
suite.BankKeeper = tApp.GetBankKeeper()
suite.AccountKeeper = tApp.GetAccountKeeper()
suite.Keeper = tApp.GetEvmutilKeeper()
suite.EvmBankKeeper = keeper.NewEvmBankKeeper(tApp.GetEvmutilKeeper(), suite.BankKeeper, suite.AccountKeeper)
suite.EvmModuleAddr = suite.AccountKeeper.GetModuleAddress(evmtypes.ModuleName)
// test evm user keys that have no minting permissions
addr, privKey := RandomEvmAccount()
suite.Key1 = privKey
suite.Key1Addr = types.NewInternalEVMAddress(addr)
_, suite.Key2 = RandomEvmAccount()
_, addrs := app.GeneratePrivKeyAddressPairs(4)
suite.Addrs = addrs
evmGenesis := evmtypes.DefaultGenesisState()
evmGenesis.Params.EvmDenom = "akava"
feemarketGenesis := feemarkettypes.DefaultGenesisState()
feemarketGenesis.Params.EnableHeight = 1
feemarketGenesis.Params.NoBaseFee = false
cdc := suite.App.AppCodec()
coins := sdk.NewCoins(sdk.NewInt64Coin("ukava", 1000_000_000_000_000_000))
authGS := app.NewFundedGenStateWithSameCoins(cdc, coins, []sdk.AccAddress{
sdk.AccAddress(suite.Key1.PubKey().Address()),
sdk.AccAddress(suite.Key2.PubKey().Address()),
})
gs := app.GenesisState{
evmtypes.ModuleName: cdc.MustMarshalJSON(evmGenesis),
feemarkettypes.ModuleName: cdc.MustMarshalJSON(feemarketGenesis),
}
suite.App.InitializeFromGenesisStates(authGS, gs)
// consensus key - needed to set up evm module
consPriv, err := ethsecp256k1.GenerateKey()
suite.Require().NoError(err)
consAddress := sdk.ConsAddress(consPriv.PubKey().Address())
// InitializeFromGenesisStates commits first block so we start at 2 here
suite.Ctx = suite.App.NewContext(false, tmproto.Header{
Height: suite.App.LastBlockHeight() + 1,
ChainID: "kavatest_1-1",
Time: time.Now().UTC(),
ProposerAddress: consAddress.Bytes(),
Version: tmversion.Consensus{
Block: version.BlockProtocol,
},
LastBlockId: tmproto.BlockID{
Hash: tmhash.Sum([]byte("block_id")),
PartSetHeader: tmproto.PartSetHeader{
Total: 11,
Hash: tmhash.Sum([]byte("partset_header")),
},
},
AppHash: tmhash.Sum([]byte("app")),
DataHash: tmhash.Sum([]byte("data")),
EvidenceHash: tmhash.Sum([]byte("evidence")),
ValidatorsHash: tmhash.Sum([]byte("validators")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators")),
ConsensusHash: tmhash.Sum([]byte("consensus")),
LastResultsHash: tmhash.Sum([]byte("last_result")),
})
// We need to set the validator as calling the EVM looks up the validator address
// https://github.com/evmos/ethermint/blob/f21592ebfe74da7590eb42ed926dae970b2a9a3f/x/evm/keeper/state_transition.go#L487
// evmkeeper.EVMConfig() will return error "failed to load evm config" if not set
acc := ðerminttypes.EthAccount{
BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(suite.Address.Bytes()), nil, 0, 0),
CodeHash: common.BytesToHash(crypto.Keccak256(nil)).String(),
}
suite.AccountKeeper.SetAccount(suite.Ctx, acc)
valAddr := sdk.ValAddress(suite.Address.Bytes())
validator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
err = suite.App.GetStakingKeeper().SetValidatorByConsAddr(suite.Ctx, validator)
suite.Require().NoError(err)
suite.App.GetStakingKeeper().SetValidator(suite.Ctx, validator)
// add conversion pair for first module deployed contract to evmutil params
suite.Keeper.SetParams(suite.Ctx, types.NewParams(
types.NewConversionPairs(
types.NewConversionPair(
// First contract this module deploys
MustNewInternalEVMAddressFromString("0x15932E26f5BD4923d46a2b205191C4b5d5f43FE3"),
"erc20/usdc",
),
),
types.NewAllowedCosmosCoinERC20Tokens(),
))
queryHelper := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry())
evmtypes.RegisterQueryServer(queryHelper, suite.App.GetEvmKeeper())
suite.QueryClientEvm = evmtypes.NewQueryClient(queryHelper)
types.RegisterQueryServer(queryHelper, keeper.NewQueryServerImpl(suite.Keeper))
suite.QueryClient = types.NewQueryClient(queryHelper)
// We need to commit so that the ethermint feemarket beginblock runs to set the minfee
// feeMarketKeeper.GetBaseFee() will return nil otherwise
suite.Commit()
}
func (suite *Suite) Commit() {
_ = suite.App.Commit()
header := suite.Ctx.BlockHeader()
header.Height += 1
suite.App.BeginBlock(abci.RequestBeginBlock{
Header: header,
})
// update ctx
suite.Ctx = suite.App.NewContext(false, header)
}
func (suite *Suite) ModuleBalance(denom string) sdk.Int {
return suite.App.GetModuleAccountBalance(suite.Ctx, types.ModuleName, denom)
}
func (suite *Suite) FundAccountWithKava(addr sdk.AccAddress, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundAccount(suite.Ctx, addr, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) FundModuleAccountWithKava(moduleName string, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundModuleAccount(suite.Ctx, moduleName, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
addr := suite.AccountKeeper.GetModuleAddress(moduleName)
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) DeployERC20() types.InternalEVMAddress {
// make sure module account is created
// qq: any better ways to do this?
suite.App.FundModuleAccount(
suite.Ctx,
types.ModuleName,
sdk.NewCoins(sdk.NewCoin("ukava", sdkmath.NewInt(0))),
)
contractAddr, err := suite.Keeper.DeployTestMintableERC20Contract(suite.Ctx, "USDC", "USDC", uint8(18))
suite.Require().NoError(err)
suite.Require().Greater(len(contractAddr.Address), 0)
return contractAddr
}
func (suite *Suite) GetERC20BalanceOf(
contractAbi abi.ABI,
contractAddr types.InternalEVMAddress,
accountAddr types.InternalEVMAddress,
) *big.Int {
// Query ERC20.balanceOf()
addr := common.BytesToAddress(suite.Key1.PubKey().Address())
res, err := suite.QueryContract(
types.ERC20MintableBurnableContract.ABI,
addr,
suite.Key1,
contractAddr,
"balanceOf",
accountAddr.Address,
)
suite.Require().NoError(err)
suite.Require().Len(res, 1)
balance, ok := res[0].(*big.Int)
suite.Require().True(ok, "balanceOf should respond with *big.Int")
return balance
}
func (suite *Suite) QueryContract(
contractAbi abi.ABI,
from common.Address,
fromKey *ethsecp256k1.PrivKey,
contract types.InternalEVMAddress,
method string,
args ...interface{},
) ([]interface{}, error) {
// Pack query args
data, err := contractAbi.Pack(method, args...)
suite.Require().NoError(err)
// Send TX
res, err := suite.SendTx(contract, from, fromKey, data)
suite.Require().NoError(err)
// Check for VM errors and unpack returned data
switch res.VmError {
case vm.ErrExecutionReverted.Error():
response, err := abi.UnpackRevert(res.Ret)
suite.Require().NoError(err)
return nil, errors.New(response)
case "": // No error, continue
default:
panic(fmt.Sprintf("unhandled vm error response: %v", res.VmError))
}
// Unpack response
unpackedRes, err := contractAbi.Unpack(method, res.Ret)
suite.Require().NoErrorf(err, "failed to unpack method %v response", method)
return unpackedRes, nil
}
// SendTx submits a transaction to the block.
func (suite *Suite) SendTx(
contractAddr types.InternalEVMAddress,
from common.Address,
signerKey *ethsecp256k1.PrivKey,
transferData []byte,
) (*evmtypes.MsgEthereumTxResponse, error) {
ctx := sdk.WrapSDKContext(suite.Ctx)
chainID := suite.App.GetEvmKeeper().ChainID()
args, err := json.Marshal(&evmtypes.TransactionArgs{
To: &contractAddr.Address,
From: &from,
Data: (*hexutil.Bytes)(&transferData),
})
if err != nil {
return nil, err
}
gasRes, err := suite.QueryClientEvm.EstimateGas(ctx, &evmtypes.EthCallRequest{
Args: args,
GasCap: config.DefaultGasCap,
})
if err != nil {
return nil, err
}
nonce := suite.App.GetEvmKeeper().GetNonce(suite.Ctx, suite.Address)
baseFee := suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx)
suite.Require().NotNil(baseFee, "base fee is nil")
// Mint the max gas to the FeeCollector to ensure balance in case of refund
suite.MintFeeCollector(sdk.NewCoins(
sdk.NewCoin(
"ukava",
sdkmath.NewInt(baseFee.Int64()*int64(gasRes.Gas*2)),
)))
ercTransferTx := evmtypes.NewTx(
chainID,
nonce,
&contractAddr.Address,
nil, // amount
gasRes.Gas*2, // gasLimit, TODO: runs out of gas with just res.Gas, ex: estimated was 21572 but used 24814
nil, // gasPrice
suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx), // gasFeeCap
big.NewInt(1), // gasTipCap
transferData,
ðtypes.AccessList{}, // accesses
)
ercTransferTx.From = hex.EncodeToString(signerKey.PubKey().Address())
err = ercTransferTx.Sign(ethtypes.LatestSignerForChainID(chainID), etherminttests.NewSigner(signerKey))
if err != nil {
return nil, err
}
rsp, err := suite.App.GetEvmKeeper().EthereumTx(ctx, ercTransferTx)
if err != nil {
return nil, err
}
// Do not check vm error here since we want to check for errors later
return rsp, nil
}
func (suite *Suite) MintFeeCollector(coins sdk.Coins) {
err := suite.App.FundModuleAccount(suite.Ctx, authtypes.FeeCollectorName, coins)
suite.Require().NoError(err)
}
// GetEvents returns emitted events on the sdk context
func (suite *Suite) GetEvents() sdk.Events {
return suite.Ctx.EventManager().Events()
}
// EventsContains asserts that the expected event is in the provided events
func (suite *Suite) EventsContains(events sdk.Events, expectedEvent sdk.Event) {
foundMatch := false
var possibleFailedMatch []sdk.Attribute
expectedAttrs := attrsToMap(expectedEvent.Attributes)
for _, event := range events {
if event.Type == expectedEvent.Type {
attrs := attrsToMap(event.Attributes)
if reflect.DeepEqual(expectedAttrs, attrs) {
foundMatch = true
} else {
possibleFailedMatch = attrs
}
}
}
if !foundMatch && possibleFailedMatch != nil {
suite.ElementsMatch(expectedAttrs, possibleFailedMatch, "unmatched attributes on event of type %s", expectedEvent.Type)
} else {
suite.Truef(foundMatch, "event of type %s not found", expectedEvent.Type)
}
}
// EventsDoNotContain asserts that the event is **not** is in the provided events
func (suite *Suite) EventsDoNotContain(events sdk.Events, eventType string) {
foundMatch := false
for _, event := range events {
if event.Type == eventType {
foundMatch = true
}
}
suite.Falsef(foundMatch, "event of type %s should not be found, but was found", eventType)
}
// BigIntsEqual is a helper method for comparing the equality of two big ints
func (suite *Suite) BigIntsEqual(expected *big.Int, actual *big.Int, msg string) {
suite.Truef(expected.Cmp(actual) == 0, "%s (expected: %s, actual: %s)", msg, expected.String(), actual.String())
}
func attrsToMap(attrs []abci.EventAttribute) []sdk.Attribute {
out := []sdk.Attribute{}
for _, attr := range attrs {
out = append(out, sdk.NewAttribute(string(attr.Key), string(attr.Value)))
}
return out
}
// MustNewInternalEVMAddressFromString returns a new InternalEVMAddress from a
// hex string. This will panic if the input hex string is invalid.
func MustNewInternalEVMAddressFromString(addrStr string) types.InternalEVMAddress {
addr, err := types.NewInternalEVMAddressFromString(addrStr)
if err != nil {
panic(err)
}
return addr
}
func RandomEvmAccount() (common.Address, *ethsecp256k1.PrivKey) |
func RandomEvmAddress() common.Address {
addr, _ := RandomEvmAccount()
return addr
}
func RandomInternalEVMAddress() types.InternalEVMAddress {
return types.NewInternalEVMAddress(RandomEvmAddress())
}
| {
privKey, err := ethsecp256k1.GenerateKey()
if err != nil {
panic(err)
}
addr := common.BytesToAddress(privKey.PubKey().Address())
return addr, privKey
} | identifier_body |
suite.go | package testutil
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"reflect"
"time"
sdkmath "cosmossdk.io/math"
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/server/config"
etherminttests "github.com/evmos/ethermint/tests"
etherminttypes "github.com/evmos/ethermint/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/evmutil/keeper"
"github.com/kava-labs/kava/x/evmutil/types"
)
type Suite struct {
suite.Suite
App app.TestApp
Ctx sdk.Context
Address common.Address
BankKeeper bankkeeper.Keeper
AccountKeeper authkeeper.AccountKeeper
Keeper keeper.Keeper
EvmBankKeeper keeper.EvmBankKeeper
Addrs []sdk.AccAddress
EvmModuleAddr sdk.AccAddress
QueryClient types.QueryClient
QueryClientEvm evmtypes.QueryClient
Key1 *ethsecp256k1.PrivKey
Key1Addr types.InternalEVMAddress
Key2 *ethsecp256k1.PrivKey
}
func (suite *Suite) SetupTest() {
tApp := app.NewTestApp()
suite.Ctx = tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
suite.App = tApp
suite.BankKeeper = tApp.GetBankKeeper()
suite.AccountKeeper = tApp.GetAccountKeeper()
suite.Keeper = tApp.GetEvmutilKeeper()
suite.EvmBankKeeper = keeper.NewEvmBankKeeper(tApp.GetEvmutilKeeper(), suite.BankKeeper, suite.AccountKeeper)
suite.EvmModuleAddr = suite.AccountKeeper.GetModuleAddress(evmtypes.ModuleName)
// test evm user keys that have no minting permissions
addr, privKey := RandomEvmAccount()
suite.Key1 = privKey
suite.Key1Addr = types.NewInternalEVMAddress(addr)
_, suite.Key2 = RandomEvmAccount()
_, addrs := app.GeneratePrivKeyAddressPairs(4)
suite.Addrs = addrs
evmGenesis := evmtypes.DefaultGenesisState()
evmGenesis.Params.EvmDenom = "akava"
feemarketGenesis := feemarkettypes.DefaultGenesisState()
feemarketGenesis.Params.EnableHeight = 1
feemarketGenesis.Params.NoBaseFee = false
cdc := suite.App.AppCodec()
coins := sdk.NewCoins(sdk.NewInt64Coin("ukava", 1000_000_000_000_000_000))
authGS := app.NewFundedGenStateWithSameCoins(cdc, coins, []sdk.AccAddress{
sdk.AccAddress(suite.Key1.PubKey().Address()),
sdk.AccAddress(suite.Key2.PubKey().Address()),
})
gs := app.GenesisState{
evmtypes.ModuleName: cdc.MustMarshalJSON(evmGenesis),
feemarkettypes.ModuleName: cdc.MustMarshalJSON(feemarketGenesis),
}
suite.App.InitializeFromGenesisStates(authGS, gs)
// consensus key - needed to set up evm module
consPriv, err := ethsecp256k1.GenerateKey()
suite.Require().NoError(err)
consAddress := sdk.ConsAddress(consPriv.PubKey().Address())
// InitializeFromGenesisStates commits first block so we start at 2 here
suite.Ctx = suite.App.NewContext(false, tmproto.Header{
Height: suite.App.LastBlockHeight() + 1,
ChainID: "kavatest_1-1",
Time: time.Now().UTC(),
ProposerAddress: consAddress.Bytes(),
Version: tmversion.Consensus{
Block: version.BlockProtocol,
},
LastBlockId: tmproto.BlockID{
Hash: tmhash.Sum([]byte("block_id")),
PartSetHeader: tmproto.PartSetHeader{
Total: 11,
Hash: tmhash.Sum([]byte("partset_header")),
},
},
AppHash: tmhash.Sum([]byte("app")),
DataHash: tmhash.Sum([]byte("data")),
EvidenceHash: tmhash.Sum([]byte("evidence")),
ValidatorsHash: tmhash.Sum([]byte("validators")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators")),
ConsensusHash: tmhash.Sum([]byte("consensus")),
LastResultsHash: tmhash.Sum([]byte("last_result")),
})
// We need to set the validator as calling the EVM looks up the validator address
// https://github.com/evmos/ethermint/blob/f21592ebfe74da7590eb42ed926dae970b2a9a3f/x/evm/keeper/state_transition.go#L487
// evmkeeper.EVMConfig() will return error "failed to load evm config" if not set
acc := ðerminttypes.EthAccount{
BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(suite.Address.Bytes()), nil, 0, 0),
CodeHash: common.BytesToHash(crypto.Keccak256(nil)).String(),
}
suite.AccountKeeper.SetAccount(suite.Ctx, acc)
valAddr := sdk.ValAddress(suite.Address.Bytes())
validator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
err = suite.App.GetStakingKeeper().SetValidatorByConsAddr(suite.Ctx, validator)
suite.Require().NoError(err)
suite.App.GetStakingKeeper().SetValidator(suite.Ctx, validator)
// add conversion pair for first module deployed contract to evmutil params
suite.Keeper.SetParams(suite.Ctx, types.NewParams(
types.NewConversionPairs(
types.NewConversionPair(
// First contract this module deploys
MustNewInternalEVMAddressFromString("0x15932E26f5BD4923d46a2b205191C4b5d5f43FE3"),
"erc20/usdc",
),
),
types.NewAllowedCosmosCoinERC20Tokens(),
))
queryHelper := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry())
evmtypes.RegisterQueryServer(queryHelper, suite.App.GetEvmKeeper())
suite.QueryClientEvm = evmtypes.NewQueryClient(queryHelper)
types.RegisterQueryServer(queryHelper, keeper.NewQueryServerImpl(suite.Keeper))
suite.QueryClient = types.NewQueryClient(queryHelper)
// We need to commit so that the ethermint feemarket beginblock runs to set the minfee
// feeMarketKeeper.GetBaseFee() will return nil otherwise
suite.Commit()
}
func (suite *Suite) Commit() {
_ = suite.App.Commit()
header := suite.Ctx.BlockHeader()
header.Height += 1
suite.App.BeginBlock(abci.RequestBeginBlock{
Header: header,
})
// update ctx
suite.Ctx = suite.App.NewContext(false, header)
}
func (suite *Suite) ModuleBalance(denom string) sdk.Int {
return suite.App.GetModuleAccountBalance(suite.Ctx, types.ModuleName, denom)
}
func (suite *Suite) FundAccountWithKava(addr sdk.AccAddress, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundAccount(suite.Ctx, addr, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) FundModuleAccountWithKava(moduleName string, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundModuleAccount(suite.Ctx, moduleName, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
addr := suite.AccountKeeper.GetModuleAddress(moduleName)
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) DeployERC20() types.InternalEVMAddress {
// make sure module account is created
// qq: any better ways to do this?
suite.App.FundModuleAccount(
suite.Ctx,
types.ModuleName,
sdk.NewCoins(sdk.NewCoin("ukava", sdkmath.NewInt(0))),
)
contractAddr, err := suite.Keeper.DeployTestMintableERC20Contract(suite.Ctx, "USDC", "USDC", uint8(18))
suite.Require().NoError(err)
suite.Require().Greater(len(contractAddr.Address), 0)
return contractAddr
}
func (suite *Suite) GetERC20BalanceOf(
contractAbi abi.ABI,
contractAddr types.InternalEVMAddress,
accountAddr types.InternalEVMAddress,
) *big.Int {
// Query ERC20.balanceOf()
addr := common.BytesToAddress(suite.Key1.PubKey().Address())
res, err := suite.QueryContract(
types.ERC20MintableBurnableContract.ABI,
addr,
suite.Key1,
contractAddr,
"balanceOf",
accountAddr.Address,
)
suite.Require().NoError(err)
suite.Require().Len(res, 1)
balance, ok := res[0].(*big.Int)
suite.Require().True(ok, "balanceOf should respond with *big.Int")
return balance
}
func (suite *Suite) QueryContract(
contractAbi abi.ABI,
from common.Address,
fromKey *ethsecp256k1.PrivKey,
contract types.InternalEVMAddress,
method string,
args ...interface{},
) ([]interface{}, error) {
// Pack query args
data, err := contractAbi.Pack(method, args...)
suite.Require().NoError(err)
// Send TX
res, err := suite.SendTx(contract, from, fromKey, data)
suite.Require().NoError(err)
// Check for VM errors and unpack returned data
switch res.VmError {
case vm.ErrExecutionReverted.Error():
response, err := abi.UnpackRevert(res.Ret)
suite.Require().NoError(err)
return nil, errors.New(response)
case "": // No error, continue
default:
panic(fmt.Sprintf("unhandled vm error response: %v", res.VmError))
}
// Unpack response
unpackedRes, err := contractAbi.Unpack(method, res.Ret)
suite.Require().NoErrorf(err, "failed to unpack method %v response", method)
return unpackedRes, nil
}
// SendTx submits a transaction to the block.
func (suite *Suite) SendTx(
contractAddr types.InternalEVMAddress,
from common.Address,
signerKey *ethsecp256k1.PrivKey,
transferData []byte,
) (*evmtypes.MsgEthereumTxResponse, error) {
ctx := sdk.WrapSDKContext(suite.Ctx)
chainID := suite.App.GetEvmKeeper().ChainID()
args, err := json.Marshal(&evmtypes.TransactionArgs{
To: &contractAddr.Address,
From: &from,
Data: (*hexutil.Bytes)(&transferData),
})
if err != nil {
return nil, err
}
gasRes, err := suite.QueryClientEvm.EstimateGas(ctx, &evmtypes.EthCallRequest{
Args: args,
GasCap: config.DefaultGasCap,
})
if err != nil {
return nil, err
}
nonce := suite.App.GetEvmKeeper().GetNonce(suite.Ctx, suite.Address)
baseFee := suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx)
suite.Require().NotNil(baseFee, "base fee is nil")
// Mint the max gas to the FeeCollector to ensure balance in case of refund
suite.MintFeeCollector(sdk.NewCoins(
sdk.NewCoin(
"ukava",
sdkmath.NewInt(baseFee.Int64()*int64(gasRes.Gas*2)),
)))
ercTransferTx := evmtypes.NewTx(
chainID,
nonce,
&contractAddr.Address,
nil, // amount
gasRes.Gas*2, // gasLimit, TODO: runs out of gas with just res.Gas, ex: estimated was 21572 but used 24814
nil, // gasPrice
suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx), // gasFeeCap
big.NewInt(1), // gasTipCap
transferData,
ðtypes.AccessList{}, // accesses
)
ercTransferTx.From = hex.EncodeToString(signerKey.PubKey().Address())
err = ercTransferTx.Sign(ethtypes.LatestSignerForChainID(chainID), etherminttests.NewSigner(signerKey))
if err != nil {
return nil, err
}
rsp, err := suite.App.GetEvmKeeper().EthereumTx(ctx, ercTransferTx)
if err != nil {
return nil, err
}
// Do not check vm error here since we want to check for errors later
return rsp, nil
}
func (suite *Suite) MintFeeCollector(coins sdk.Coins) {
err := suite.App.FundModuleAccount(suite.Ctx, authtypes.FeeCollectorName, coins)
suite.Require().NoError(err)
}
// GetEvents returns emitted events on the sdk context
func (suite *Suite) GetEvents() sdk.Events {
return suite.Ctx.EventManager().Events()
}
// EventsContains asserts that the expected event is in the provided events
func (suite *Suite) EventsContains(events sdk.Events, expectedEvent sdk.Event) {
foundMatch := false
var possibleFailedMatch []sdk.Attribute
expectedAttrs := attrsToMap(expectedEvent.Attributes)
for _, event := range events {
if event.Type == expectedEvent.Type {
attrs := attrsToMap(event.Attributes)
if reflect.DeepEqual(expectedAttrs, attrs) {
foundMatch = true
} else {
possibleFailedMatch = attrs
}
}
}
if !foundMatch && possibleFailedMatch != nil {
suite.ElementsMatch(expectedAttrs, possibleFailedMatch, "unmatched attributes on event of type %s", expectedEvent.Type)
} else {
suite.Truef(foundMatch, "event of type %s not found", expectedEvent.Type)
}
}
// EventsDoNotContain asserts that the event is **not** is in the provided events
func (suite *Suite) EventsDoNotContain(events sdk.Events, eventType string) {
foundMatch := false
for _, event := range events {
if event.Type == eventType {
foundMatch = true
}
}
suite.Falsef(foundMatch, "event of type %s should not be found, but was found", eventType)
}
// BigIntsEqual is a helper method for comparing the equality of two big ints
func (suite *Suite) BigIntsEqual(expected *big.Int, actual *big.Int, msg string) {
suite.Truef(expected.Cmp(actual) == 0, "%s (expected: %s, actual: %s)", msg, expected.String(), actual.String())
}
func attrsToMap(attrs []abci.EventAttribute) []sdk.Attribute {
out := []sdk.Attribute{}
for _, attr := range attrs {
out = append(out, sdk.NewAttribute(string(attr.Key), string(attr.Value)))
}
return out
}
// MustNewInternalEVMAddressFromString returns a new InternalEVMAddress from a
// hex string. This will panic if the input hex string is invalid.
func | (addrStr string) types.InternalEVMAddress {
addr, err := types.NewInternalEVMAddressFromString(addrStr)
if err != nil {
panic(err)
}
return addr
}
func RandomEvmAccount() (common.Address, *ethsecp256k1.PrivKey) {
privKey, err := ethsecp256k1.GenerateKey()
if err != nil {
panic(err)
}
addr := common.BytesToAddress(privKey.PubKey().Address())
return addr, privKey
}
func RandomEvmAddress() common.Address {
addr, _ := RandomEvmAccount()
return addr
}
func RandomInternalEVMAddress() types.InternalEVMAddress {
return types.NewInternalEVMAddress(RandomEvmAddress())
}
| MustNewInternalEVMAddressFromString | identifier_name |
suite.go | package testutil
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"reflect"
"time"
sdkmath "cosmossdk.io/math"
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/server/config"
etherminttests "github.com/evmos/ethermint/tests"
etherminttypes "github.com/evmos/ethermint/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite" | abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/evmutil/keeper"
"github.com/kava-labs/kava/x/evmutil/types"
)
type Suite struct {
suite.Suite
App app.TestApp
Ctx sdk.Context
Address common.Address
BankKeeper bankkeeper.Keeper
AccountKeeper authkeeper.AccountKeeper
Keeper keeper.Keeper
EvmBankKeeper keeper.EvmBankKeeper
Addrs []sdk.AccAddress
EvmModuleAddr sdk.AccAddress
QueryClient types.QueryClient
QueryClientEvm evmtypes.QueryClient
Key1 *ethsecp256k1.PrivKey
Key1Addr types.InternalEVMAddress
Key2 *ethsecp256k1.PrivKey
}
func (suite *Suite) SetupTest() {
tApp := app.NewTestApp()
suite.Ctx = tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
suite.App = tApp
suite.BankKeeper = tApp.GetBankKeeper()
suite.AccountKeeper = tApp.GetAccountKeeper()
suite.Keeper = tApp.GetEvmutilKeeper()
suite.EvmBankKeeper = keeper.NewEvmBankKeeper(tApp.GetEvmutilKeeper(), suite.BankKeeper, suite.AccountKeeper)
suite.EvmModuleAddr = suite.AccountKeeper.GetModuleAddress(evmtypes.ModuleName)
// test evm user keys that have no minting permissions
addr, privKey := RandomEvmAccount()
suite.Key1 = privKey
suite.Key1Addr = types.NewInternalEVMAddress(addr)
_, suite.Key2 = RandomEvmAccount()
_, addrs := app.GeneratePrivKeyAddressPairs(4)
suite.Addrs = addrs
evmGenesis := evmtypes.DefaultGenesisState()
evmGenesis.Params.EvmDenom = "akava"
feemarketGenesis := feemarkettypes.DefaultGenesisState()
feemarketGenesis.Params.EnableHeight = 1
feemarketGenesis.Params.NoBaseFee = false
cdc := suite.App.AppCodec()
coins := sdk.NewCoins(sdk.NewInt64Coin("ukava", 1000_000_000_000_000_000))
authGS := app.NewFundedGenStateWithSameCoins(cdc, coins, []sdk.AccAddress{
sdk.AccAddress(suite.Key1.PubKey().Address()),
sdk.AccAddress(suite.Key2.PubKey().Address()),
})
gs := app.GenesisState{
evmtypes.ModuleName: cdc.MustMarshalJSON(evmGenesis),
feemarkettypes.ModuleName: cdc.MustMarshalJSON(feemarketGenesis),
}
suite.App.InitializeFromGenesisStates(authGS, gs)
// consensus key - needed to set up evm module
consPriv, err := ethsecp256k1.GenerateKey()
suite.Require().NoError(err)
consAddress := sdk.ConsAddress(consPriv.PubKey().Address())
// InitializeFromGenesisStates commits first block so we start at 2 here
suite.Ctx = suite.App.NewContext(false, tmproto.Header{
Height: suite.App.LastBlockHeight() + 1,
ChainID: "kavatest_1-1",
Time: time.Now().UTC(),
ProposerAddress: consAddress.Bytes(),
Version: tmversion.Consensus{
Block: version.BlockProtocol,
},
LastBlockId: tmproto.BlockID{
Hash: tmhash.Sum([]byte("block_id")),
PartSetHeader: tmproto.PartSetHeader{
Total: 11,
Hash: tmhash.Sum([]byte("partset_header")),
},
},
AppHash: tmhash.Sum([]byte("app")),
DataHash: tmhash.Sum([]byte("data")),
EvidenceHash: tmhash.Sum([]byte("evidence")),
ValidatorsHash: tmhash.Sum([]byte("validators")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators")),
ConsensusHash: tmhash.Sum([]byte("consensus")),
LastResultsHash: tmhash.Sum([]byte("last_result")),
})
// We need to set the validator as calling the EVM looks up the validator address
// https://github.com/evmos/ethermint/blob/f21592ebfe74da7590eb42ed926dae970b2a9a3f/x/evm/keeper/state_transition.go#L487
// evmkeeper.EVMConfig() will return error "failed to load evm config" if not set
acc := ðerminttypes.EthAccount{
BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(suite.Address.Bytes()), nil, 0, 0),
CodeHash: common.BytesToHash(crypto.Keccak256(nil)).String(),
}
suite.AccountKeeper.SetAccount(suite.Ctx, acc)
valAddr := sdk.ValAddress(suite.Address.Bytes())
validator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
err = suite.App.GetStakingKeeper().SetValidatorByConsAddr(suite.Ctx, validator)
suite.Require().NoError(err)
suite.App.GetStakingKeeper().SetValidator(suite.Ctx, validator)
// add conversion pair for first module deployed contract to evmutil params
suite.Keeper.SetParams(suite.Ctx, types.NewParams(
types.NewConversionPairs(
types.NewConversionPair(
// First contract this module deploys
MustNewInternalEVMAddressFromString("0x15932E26f5BD4923d46a2b205191C4b5d5f43FE3"),
"erc20/usdc",
),
),
types.NewAllowedCosmosCoinERC20Tokens(),
))
queryHelper := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry())
evmtypes.RegisterQueryServer(queryHelper, suite.App.GetEvmKeeper())
suite.QueryClientEvm = evmtypes.NewQueryClient(queryHelper)
types.RegisterQueryServer(queryHelper, keeper.NewQueryServerImpl(suite.Keeper))
suite.QueryClient = types.NewQueryClient(queryHelper)
// We need to commit so that the ethermint feemarket beginblock runs to set the minfee
// feeMarketKeeper.GetBaseFee() will return nil otherwise
suite.Commit()
}
func (suite *Suite) Commit() {
_ = suite.App.Commit()
header := suite.Ctx.BlockHeader()
header.Height += 1
suite.App.BeginBlock(abci.RequestBeginBlock{
Header: header,
})
// update ctx
suite.Ctx = suite.App.NewContext(false, header)
}
func (suite *Suite) ModuleBalance(denom string) sdk.Int {
return suite.App.GetModuleAccountBalance(suite.Ctx, types.ModuleName, denom)
}
func (suite *Suite) FundAccountWithKava(addr sdk.AccAddress, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundAccount(suite.Ctx, addr, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) FundModuleAccountWithKava(moduleName string, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundModuleAccount(suite.Ctx, moduleName, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
addr := suite.AccountKeeper.GetModuleAddress(moduleName)
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) DeployERC20() types.InternalEVMAddress {
// make sure module account is created
// qq: any better ways to do this?
suite.App.FundModuleAccount(
suite.Ctx,
types.ModuleName,
sdk.NewCoins(sdk.NewCoin("ukava", sdkmath.NewInt(0))),
)
contractAddr, err := suite.Keeper.DeployTestMintableERC20Contract(suite.Ctx, "USDC", "USDC", uint8(18))
suite.Require().NoError(err)
suite.Require().Greater(len(contractAddr.Address), 0)
return contractAddr
}
func (suite *Suite) GetERC20BalanceOf(
contractAbi abi.ABI,
contractAddr types.InternalEVMAddress,
accountAddr types.InternalEVMAddress,
) *big.Int {
// Query ERC20.balanceOf()
addr := common.BytesToAddress(suite.Key1.PubKey().Address())
res, err := suite.QueryContract(
types.ERC20MintableBurnableContract.ABI,
addr,
suite.Key1,
contractAddr,
"balanceOf",
accountAddr.Address,
)
suite.Require().NoError(err)
suite.Require().Len(res, 1)
balance, ok := res[0].(*big.Int)
suite.Require().True(ok, "balanceOf should respond with *big.Int")
return balance
}
func (suite *Suite) QueryContract(
contractAbi abi.ABI,
from common.Address,
fromKey *ethsecp256k1.PrivKey,
contract types.InternalEVMAddress,
method string,
args ...interface{},
) ([]interface{}, error) {
// Pack query args
data, err := contractAbi.Pack(method, args...)
suite.Require().NoError(err)
// Send TX
res, err := suite.SendTx(contract, from, fromKey, data)
suite.Require().NoError(err)
// Check for VM errors and unpack returned data
switch res.VmError {
case vm.ErrExecutionReverted.Error():
response, err := abi.UnpackRevert(res.Ret)
suite.Require().NoError(err)
return nil, errors.New(response)
case "": // No error, continue
default:
panic(fmt.Sprintf("unhandled vm error response: %v", res.VmError))
}
// Unpack response
unpackedRes, err := contractAbi.Unpack(method, res.Ret)
suite.Require().NoErrorf(err, "failed to unpack method %v response", method)
return unpackedRes, nil
}
// SendTx submits a transaction to the block.
func (suite *Suite) SendTx(
contractAddr types.InternalEVMAddress,
from common.Address,
signerKey *ethsecp256k1.PrivKey,
transferData []byte,
) (*evmtypes.MsgEthereumTxResponse, error) {
ctx := sdk.WrapSDKContext(suite.Ctx)
chainID := suite.App.GetEvmKeeper().ChainID()
args, err := json.Marshal(&evmtypes.TransactionArgs{
To: &contractAddr.Address,
From: &from,
Data: (*hexutil.Bytes)(&transferData),
})
if err != nil {
return nil, err
}
gasRes, err := suite.QueryClientEvm.EstimateGas(ctx, &evmtypes.EthCallRequest{
Args: args,
GasCap: config.DefaultGasCap,
})
if err != nil {
return nil, err
}
nonce := suite.App.GetEvmKeeper().GetNonce(suite.Ctx, suite.Address)
baseFee := suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx)
suite.Require().NotNil(baseFee, "base fee is nil")
// Mint the max gas to the FeeCollector to ensure balance in case of refund
suite.MintFeeCollector(sdk.NewCoins(
sdk.NewCoin(
"ukava",
sdkmath.NewInt(baseFee.Int64()*int64(gasRes.Gas*2)),
)))
ercTransferTx := evmtypes.NewTx(
chainID,
nonce,
&contractAddr.Address,
nil, // amount
gasRes.Gas*2, // gasLimit, TODO: runs out of gas with just res.Gas, ex: estimated was 21572 but used 24814
nil, // gasPrice
suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx), // gasFeeCap
big.NewInt(1), // gasTipCap
transferData,
ðtypes.AccessList{}, // accesses
)
ercTransferTx.From = hex.EncodeToString(signerKey.PubKey().Address())
err = ercTransferTx.Sign(ethtypes.LatestSignerForChainID(chainID), etherminttests.NewSigner(signerKey))
if err != nil {
return nil, err
}
rsp, err := suite.App.GetEvmKeeper().EthereumTx(ctx, ercTransferTx)
if err != nil {
return nil, err
}
// Do not check vm error here since we want to check for errors later
return rsp, nil
}
func (suite *Suite) MintFeeCollector(coins sdk.Coins) {
err := suite.App.FundModuleAccount(suite.Ctx, authtypes.FeeCollectorName, coins)
suite.Require().NoError(err)
}
// GetEvents returns emitted events on the sdk context
func (suite *Suite) GetEvents() sdk.Events {
return suite.Ctx.EventManager().Events()
}
// EventsContains asserts that the expected event is in the provided events
func (suite *Suite) EventsContains(events sdk.Events, expectedEvent sdk.Event) {
foundMatch := false
var possibleFailedMatch []sdk.Attribute
expectedAttrs := attrsToMap(expectedEvent.Attributes)
for _, event := range events {
if event.Type == expectedEvent.Type {
attrs := attrsToMap(event.Attributes)
if reflect.DeepEqual(expectedAttrs, attrs) {
foundMatch = true
} else {
possibleFailedMatch = attrs
}
}
}
if !foundMatch && possibleFailedMatch != nil {
suite.ElementsMatch(expectedAttrs, possibleFailedMatch, "unmatched attributes on event of type %s", expectedEvent.Type)
} else {
suite.Truef(foundMatch, "event of type %s not found", expectedEvent.Type)
}
}
// EventsDoNotContain asserts that the event is **not** is in the provided events
func (suite *Suite) EventsDoNotContain(events sdk.Events, eventType string) {
foundMatch := false
for _, event := range events {
if event.Type == eventType {
foundMatch = true
}
}
suite.Falsef(foundMatch, "event of type %s should not be found, but was found", eventType)
}
// BigIntsEqual is a helper method for comparing the equality of two big ints
func (suite *Suite) BigIntsEqual(expected *big.Int, actual *big.Int, msg string) {
suite.Truef(expected.Cmp(actual) == 0, "%s (expected: %s, actual: %s)", msg, expected.String(), actual.String())
}
func attrsToMap(attrs []abci.EventAttribute) []sdk.Attribute {
out := []sdk.Attribute{}
for _, attr := range attrs {
out = append(out, sdk.NewAttribute(string(attr.Key), string(attr.Value)))
}
return out
}
// MustNewInternalEVMAddressFromString returns a new InternalEVMAddress from a
// hex string. This will panic if the input hex string is invalid.
func MustNewInternalEVMAddressFromString(addrStr string) types.InternalEVMAddress {
addr, err := types.NewInternalEVMAddressFromString(addrStr)
if err != nil {
panic(err)
}
return addr
}
func RandomEvmAccount() (common.Address, *ethsecp256k1.PrivKey) {
privKey, err := ethsecp256k1.GenerateKey()
if err != nil {
panic(err)
}
addr := common.BytesToAddress(privKey.PubKey().Address())
return addr, privKey
}
func RandomEvmAddress() common.Address {
addr, _ := RandomEvmAccount()
return addr
}
func RandomInternalEVMAddress() types.InternalEVMAddress {
return types.NewInternalEVMAddress(RandomEvmAddress())
} | random_line_split | |
suite.go | package testutil
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"reflect"
"time"
sdkmath "cosmossdk.io/math"
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/server/config"
etherminttests "github.com/evmos/ethermint/tests"
etherminttypes "github.com/evmos/ethermint/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/evmutil/keeper"
"github.com/kava-labs/kava/x/evmutil/types"
)
type Suite struct {
suite.Suite
App app.TestApp
Ctx sdk.Context
Address common.Address
BankKeeper bankkeeper.Keeper
AccountKeeper authkeeper.AccountKeeper
Keeper keeper.Keeper
EvmBankKeeper keeper.EvmBankKeeper
Addrs []sdk.AccAddress
EvmModuleAddr sdk.AccAddress
QueryClient types.QueryClient
QueryClientEvm evmtypes.QueryClient
Key1 *ethsecp256k1.PrivKey
Key1Addr types.InternalEVMAddress
Key2 *ethsecp256k1.PrivKey
}
func (suite *Suite) SetupTest() {
tApp := app.NewTestApp()
suite.Ctx = tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
suite.App = tApp
suite.BankKeeper = tApp.GetBankKeeper()
suite.AccountKeeper = tApp.GetAccountKeeper()
suite.Keeper = tApp.GetEvmutilKeeper()
suite.EvmBankKeeper = keeper.NewEvmBankKeeper(tApp.GetEvmutilKeeper(), suite.BankKeeper, suite.AccountKeeper)
suite.EvmModuleAddr = suite.AccountKeeper.GetModuleAddress(evmtypes.ModuleName)
// test evm user keys that have no minting permissions
addr, privKey := RandomEvmAccount()
suite.Key1 = privKey
suite.Key1Addr = types.NewInternalEVMAddress(addr)
_, suite.Key2 = RandomEvmAccount()
_, addrs := app.GeneratePrivKeyAddressPairs(4)
suite.Addrs = addrs
evmGenesis := evmtypes.DefaultGenesisState()
evmGenesis.Params.EvmDenom = "akava"
feemarketGenesis := feemarkettypes.DefaultGenesisState()
feemarketGenesis.Params.EnableHeight = 1
feemarketGenesis.Params.NoBaseFee = false
cdc := suite.App.AppCodec()
coins := sdk.NewCoins(sdk.NewInt64Coin("ukava", 1000_000_000_000_000_000))
authGS := app.NewFundedGenStateWithSameCoins(cdc, coins, []sdk.AccAddress{
sdk.AccAddress(suite.Key1.PubKey().Address()),
sdk.AccAddress(suite.Key2.PubKey().Address()),
})
gs := app.GenesisState{
evmtypes.ModuleName: cdc.MustMarshalJSON(evmGenesis),
feemarkettypes.ModuleName: cdc.MustMarshalJSON(feemarketGenesis),
}
suite.App.InitializeFromGenesisStates(authGS, gs)
// consensus key - needed to set up evm module
consPriv, err := ethsecp256k1.GenerateKey()
suite.Require().NoError(err)
consAddress := sdk.ConsAddress(consPriv.PubKey().Address())
// InitializeFromGenesisStates commits first block so we start at 2 here
suite.Ctx = suite.App.NewContext(false, tmproto.Header{
Height: suite.App.LastBlockHeight() + 1,
ChainID: "kavatest_1-1",
Time: time.Now().UTC(),
ProposerAddress: consAddress.Bytes(),
Version: tmversion.Consensus{
Block: version.BlockProtocol,
},
LastBlockId: tmproto.BlockID{
Hash: tmhash.Sum([]byte("block_id")),
PartSetHeader: tmproto.PartSetHeader{
Total: 11,
Hash: tmhash.Sum([]byte("partset_header")),
},
},
AppHash: tmhash.Sum([]byte("app")),
DataHash: tmhash.Sum([]byte("data")),
EvidenceHash: tmhash.Sum([]byte("evidence")),
ValidatorsHash: tmhash.Sum([]byte("validators")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators")),
ConsensusHash: tmhash.Sum([]byte("consensus")),
LastResultsHash: tmhash.Sum([]byte("last_result")),
})
// We need to set the validator as calling the EVM looks up the validator address
// https://github.com/evmos/ethermint/blob/f21592ebfe74da7590eb42ed926dae970b2a9a3f/x/evm/keeper/state_transition.go#L487
// evmkeeper.EVMConfig() will return error "failed to load evm config" if not set
acc := ðerminttypes.EthAccount{
BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(suite.Address.Bytes()), nil, 0, 0),
CodeHash: common.BytesToHash(crypto.Keccak256(nil)).String(),
}
suite.AccountKeeper.SetAccount(suite.Ctx, acc)
valAddr := sdk.ValAddress(suite.Address.Bytes())
validator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
err = suite.App.GetStakingKeeper().SetValidatorByConsAddr(suite.Ctx, validator)
suite.Require().NoError(err)
suite.App.GetStakingKeeper().SetValidator(suite.Ctx, validator)
// add conversion pair for first module deployed contract to evmutil params
suite.Keeper.SetParams(suite.Ctx, types.NewParams(
types.NewConversionPairs(
types.NewConversionPair(
// First contract this module deploys
MustNewInternalEVMAddressFromString("0x15932E26f5BD4923d46a2b205191C4b5d5f43FE3"),
"erc20/usdc",
),
),
types.NewAllowedCosmosCoinERC20Tokens(),
))
queryHelper := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry())
evmtypes.RegisterQueryServer(queryHelper, suite.App.GetEvmKeeper())
suite.QueryClientEvm = evmtypes.NewQueryClient(queryHelper)
types.RegisterQueryServer(queryHelper, keeper.NewQueryServerImpl(suite.Keeper))
suite.QueryClient = types.NewQueryClient(queryHelper)
// We need to commit so that the ethermint feemarket beginblock runs to set the minfee
// feeMarketKeeper.GetBaseFee() will return nil otherwise
suite.Commit()
}
func (suite *Suite) Commit() {
_ = suite.App.Commit()
header := suite.Ctx.BlockHeader()
header.Height += 1
suite.App.BeginBlock(abci.RequestBeginBlock{
Header: header,
})
// update ctx
suite.Ctx = suite.App.NewContext(false, header)
}
func (suite *Suite) ModuleBalance(denom string) sdk.Int {
return suite.App.GetModuleAccountBalance(suite.Ctx, types.ModuleName, denom)
}
func (suite *Suite) FundAccountWithKava(addr sdk.AccAddress, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundAccount(suite.Ctx, addr, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) FundModuleAccountWithKava(moduleName string, coins sdk.Coins) {
ukava := coins.AmountOf("ukava")
if ukava.IsPositive() {
err := suite.App.FundModuleAccount(suite.Ctx, moduleName, sdk.NewCoins(sdk.NewCoin("ukava", ukava)))
suite.Require().NoError(err)
}
akava := coins.AmountOf("akava")
if akava.IsPositive() {
addr := suite.AccountKeeper.GetModuleAddress(moduleName)
err := suite.Keeper.SetBalance(suite.Ctx, addr, akava)
suite.Require().NoError(err)
}
}
func (suite *Suite) DeployERC20() types.InternalEVMAddress {
// make sure module account is created
// qq: any better ways to do this?
suite.App.FundModuleAccount(
suite.Ctx,
types.ModuleName,
sdk.NewCoins(sdk.NewCoin("ukava", sdkmath.NewInt(0))),
)
contractAddr, err := suite.Keeper.DeployTestMintableERC20Contract(suite.Ctx, "USDC", "USDC", uint8(18))
suite.Require().NoError(err)
suite.Require().Greater(len(contractAddr.Address), 0)
return contractAddr
}
func (suite *Suite) GetERC20BalanceOf(
contractAbi abi.ABI,
contractAddr types.InternalEVMAddress,
accountAddr types.InternalEVMAddress,
) *big.Int {
// Query ERC20.balanceOf()
addr := common.BytesToAddress(suite.Key1.PubKey().Address())
res, err := suite.QueryContract(
types.ERC20MintableBurnableContract.ABI,
addr,
suite.Key1,
contractAddr,
"balanceOf",
accountAddr.Address,
)
suite.Require().NoError(err)
suite.Require().Len(res, 1)
balance, ok := res[0].(*big.Int)
suite.Require().True(ok, "balanceOf should respond with *big.Int")
return balance
}
func (suite *Suite) QueryContract(
contractAbi abi.ABI,
from common.Address,
fromKey *ethsecp256k1.PrivKey,
contract types.InternalEVMAddress,
method string,
args ...interface{},
) ([]interface{}, error) {
// Pack query args
data, err := contractAbi.Pack(method, args...)
suite.Require().NoError(err)
// Send TX
res, err := suite.SendTx(contract, from, fromKey, data)
suite.Require().NoError(err)
// Check for VM errors and unpack returned data
switch res.VmError {
case vm.ErrExecutionReverted.Error():
response, err := abi.UnpackRevert(res.Ret)
suite.Require().NoError(err)
return nil, errors.New(response)
case "": // No error, continue
default:
panic(fmt.Sprintf("unhandled vm error response: %v", res.VmError))
}
// Unpack response
unpackedRes, err := contractAbi.Unpack(method, res.Ret)
suite.Require().NoErrorf(err, "failed to unpack method %v response", method)
return unpackedRes, nil
}
// SendTx submits a transaction to the block.
func (suite *Suite) SendTx(
contractAddr types.InternalEVMAddress,
from common.Address,
signerKey *ethsecp256k1.PrivKey,
transferData []byte,
) (*evmtypes.MsgEthereumTxResponse, error) {
ctx := sdk.WrapSDKContext(suite.Ctx)
chainID := suite.App.GetEvmKeeper().ChainID()
args, err := json.Marshal(&evmtypes.TransactionArgs{
To: &contractAddr.Address,
From: &from,
Data: (*hexutil.Bytes)(&transferData),
})
if err != nil {
return nil, err
}
gasRes, err := suite.QueryClientEvm.EstimateGas(ctx, &evmtypes.EthCallRequest{
Args: args,
GasCap: config.DefaultGasCap,
})
if err != nil {
return nil, err
}
nonce := suite.App.GetEvmKeeper().GetNonce(suite.Ctx, suite.Address)
baseFee := suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx)
suite.Require().NotNil(baseFee, "base fee is nil")
// Mint the max gas to the FeeCollector to ensure balance in case of refund
suite.MintFeeCollector(sdk.NewCoins(
sdk.NewCoin(
"ukava",
sdkmath.NewInt(baseFee.Int64()*int64(gasRes.Gas*2)),
)))
ercTransferTx := evmtypes.NewTx(
chainID,
nonce,
&contractAddr.Address,
nil, // amount
gasRes.Gas*2, // gasLimit, TODO: runs out of gas with just res.Gas, ex: estimated was 21572 but used 24814
nil, // gasPrice
suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx), // gasFeeCap
big.NewInt(1), // gasTipCap
transferData,
ðtypes.AccessList{}, // accesses
)
ercTransferTx.From = hex.EncodeToString(signerKey.PubKey().Address())
err = ercTransferTx.Sign(ethtypes.LatestSignerForChainID(chainID), etherminttests.NewSigner(signerKey))
if err != nil {
return nil, err
}
rsp, err := suite.App.GetEvmKeeper().EthereumTx(ctx, ercTransferTx)
if err != nil {
return nil, err
}
// Do not check vm error here since we want to check for errors later
return rsp, nil
}
func (suite *Suite) MintFeeCollector(coins sdk.Coins) {
err := suite.App.FundModuleAccount(suite.Ctx, authtypes.FeeCollectorName, coins)
suite.Require().NoError(err)
}
// GetEvents returns emitted events on the sdk context
func (suite *Suite) GetEvents() sdk.Events {
return suite.Ctx.EventManager().Events()
}
// EventsContains asserts that the expected event is in the provided events
func (suite *Suite) EventsContains(events sdk.Events, expectedEvent sdk.Event) {
foundMatch := false
var possibleFailedMatch []sdk.Attribute
expectedAttrs := attrsToMap(expectedEvent.Attributes)
for _, event := range events {
if event.Type == expectedEvent.Type {
attrs := attrsToMap(event.Attributes)
if reflect.DeepEqual(expectedAttrs, attrs) {
foundMatch = true
} else {
possibleFailedMatch = attrs
}
}
}
if !foundMatch && possibleFailedMatch != nil | else {
suite.Truef(foundMatch, "event of type %s not found", expectedEvent.Type)
}
}
// EventsDoNotContain asserts that the event is **not** is in the provided events
func (suite *Suite) EventsDoNotContain(events sdk.Events, eventType string) {
foundMatch := false
for _, event := range events {
if event.Type == eventType {
foundMatch = true
}
}
suite.Falsef(foundMatch, "event of type %s should not be found, but was found", eventType)
}
// BigIntsEqual is a helper method for comparing the equality of two big ints
func (suite *Suite) BigIntsEqual(expected *big.Int, actual *big.Int, msg string) {
suite.Truef(expected.Cmp(actual) == 0, "%s (expected: %s, actual: %s)", msg, expected.String(), actual.String())
}
func attrsToMap(attrs []abci.EventAttribute) []sdk.Attribute {
out := []sdk.Attribute{}
for _, attr := range attrs {
out = append(out, sdk.NewAttribute(string(attr.Key), string(attr.Value)))
}
return out
}
// MustNewInternalEVMAddressFromString returns a new InternalEVMAddress from a
// hex string. This will panic if the input hex string is invalid.
func MustNewInternalEVMAddressFromString(addrStr string) types.InternalEVMAddress {
addr, err := types.NewInternalEVMAddressFromString(addrStr)
if err != nil {
panic(err)
}
return addr
}
func RandomEvmAccount() (common.Address, *ethsecp256k1.PrivKey) {
privKey, err := ethsecp256k1.GenerateKey()
if err != nil {
panic(err)
}
addr := common.BytesToAddress(privKey.PubKey().Address())
return addr, privKey
}
func RandomEvmAddress() common.Address {
addr, _ := RandomEvmAccount()
return addr
}
func RandomInternalEVMAddress() types.InternalEVMAddress {
return types.NewInternalEVMAddress(RandomEvmAddress())
}
| {
suite.ElementsMatch(expectedAttrs, possibleFailedMatch, "unmatched attributes on event of type %s", expectedEvent.Type)
} | conditional_block |
# WAD SERVER TEST as MULTI-1.py | # WAD SERVER (DRONE JONGHAP GUWANJE SYSTEM)
# Licence Isaac Kim-leader of team RETELLIGENCE
# 2016
import threading
from socket import *
import time
import os
import random
from datetime import datetime
print('**************************************************************************')
print('* start_THE_First_WADING_Connection_VIA_RETELLIGENCE_IEZANOV_IWEN *')
print('**************************************************************************')
# make file directory
#os.makedirs('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen')
# open file
# file handle
#F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\User_info.txt', '+')
# file read
#R_users_h = F_users_h.read()
F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\WAD_pinList.txt', '+') # loading pin data from DBMS
WAD_pinList = []
R_users_h = F_users_h.read()
C_users_h = R_users_h # copy of pin data
while(1): # loading pin data for making user ID
try:
for i in range(len(C_users_h)):
if (C_users_h[i]=='#') and (C_users_h[i+1]=='#') and (C_users_h[i+2]=='#'):
WAD_pinList.append((C_users_h[i+3:i+12])
except:
break
# preset list
# 12000~24000 WAD
# 24000~36000 app
# 36000~48000 platform
Server_Rx_prot_len = 24
Server_TX_prot_len =
server_adr = getadr()
LIVE_WAD_list = []
x_code = '###WAD*#*' # Server Rx / WAD Tx
xx_code = '###WAD#*#' # Server Tx / WAD Rx
re_list = []
## WAD ##
# step 1
"""
Server_Rx_prot =
1.[9] ###WAD*#*
2.[9] ID : _ _ _ _ _ _ _ _ _
3.[6] PW : _ _ _ _ _ _
"""
# step 2n
"""
Server_Tx_prot =
1.[9] ###WAD#*#
2.[5] New Port number
3.[6] PW : _ _ _ _ _ _
4.[5] command : _ _ _ _ _
"""
# step 2n+1
"""
Server_Rx_data =
1.[9] x_code (###WAD*#*)
2.[9] ID
3.[6] PW
4.[6] status
5.[128] beacon
"""
## APP ##
## PLATFORM ##
# step 1
"""
Server_Rx_prot =
1.[9] x_code
2.[9] ID
3.[6] PW
"""
# step 2
"""
"""
def check_multi_id(id_give): # check if some one is using same id (register sequence on app)
if id_give in R_users_h:
return (1)
else:
return(0)
def getadr():
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("gmail.com",80))
r = s.getsockname()[0]
s.close()
print('Server IP address : ', r)
return (r)
def check_prot(wad_info):
pass
def disconnect_all():
serverSocket.close()
sys.exit()
print("Manually disconnected..")
time.sleep(0.5)
for i in range(9)
print("Programm will automaticly quit in %s seconds.." % str(9-int(i)))
time.sleep(1)
quit()
def save_leftover():
pass
def start_socket():
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, main_port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
##
### RETELLIGENCE WADING IN--on new thread
##class WADING(threading.Tread):
##
## def run(self, WAD):
##
## # WAD[0:9] = x_code
## # WAD[9:18] = WAD id
## # WAD[18:24] = WAD pw
## # WAD[24:29] = WAD new Port number
##
## given_PW =
## State = 'protocal_initializing'
##
## ### SERVER TX PROTOCALL DATA PACKET
## serverTx_protocall = xx_code + str(private_port) + '/' + str(given_PW) + '/' + str(State) + '/'
##
## ### socket BIND
## serverSocket = socket(AF_INET,SOCK_STREAM)
## serverSocket.bind((server_adr, private_port))
## serverSocket.listen(1)
## connectionSocket, addr = serverSocket.accept()
## connectionSocket.send = (serverTx_protocall.encode())
##
## recv_data = erverSovket.recv(1024)
##
## if (recv_data[0:9] == x_code):
## print(recv_data[9:18], ' recieving')
## a
## # error => break
## # what is suspend
##
##
##
##
##start_socket()
##
##
##while(1):
## request_list = []
## input_WAD = connectionSocket.recv(2048)
## while(1):
## if input_WAD != None:
##
## # length check
## if len(input_WAD)%24 == 0:
##
## # encription check
## if input_WAD[0:9] == x_code:
## ident_info = input_WAD[9:24]
## ident_info = '[' + ident_info + ']'
## if ident_info in WAD_ident_list:
## # send to request list
## request_list.append(input_WAD[0:24])
##
## # iteration go
## input_WAD = Input_WAD[24:]
## continue
## else:
## break
##
##
##
##
## for WAD in reauest_list:
##
## print('Starting new chapter')
## # set private port
## private_port = 12000 + 1
## while(1):
## if private_port in socket_list:
## private_port += 1
## else:
## break
## socket_list.append(private_port)
##
## # WAD send new port number
## Server_Tx_p = str(xx_code + private_port)
## serverSocket.send(Server_Tx_p.encode())
##
## # WAD info + port number
## WAD = WAD + str(private_port)
##
##
## # thread starting
## print(WAD)
## th = WADNIG(WAD)
## th.start()
##
## ## NOW U MAY ENTER WAD PROTOCAL
## WELCOME TO THE WORLD WIDE ASOCCIATION OF DRONES VIA INTERCONNECTED NETWORK INTEGERATION - RETELLIGENCE IWEN IEZANOV
#if 'Name' == __Main__:
def WADING(port):
while(1):
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
while(1):
in_1 = connectionSocket.recv(1024) # recieve from wad
if in_1[:9] != x_code:
printQ("!!! Unidentified connection", port, str(datetime.now())
connectionSocket.close()
break
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for WAD :: ', id_1, str(datetime.now())
connectionSocket.send(server_Rx_wad)
def APP(port):
def Make_id_i():
global WAD_pinlist
type_WAD = 'i'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from app
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for app :: ', id_1, str(datetime.now())
def PLATFORM(port):
def Make_id_s():
global WAD_pinlist
type_WAD = 's'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
F_users_h.write('\n')
F_users_h.write(write_info)
F_users_h.write('\n')
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from platform
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for platform :: ', id_1, str(datetime.now())
def printQ(sent):
global prints
if prints == True:
print(sent)
def initial_WAD():
for i in range(12000, 24000):
WADING(i)
def initial_APP():
for j in range(12000, 24000):
APP(j)
def initial_PLATFORM():
for k in range(12000, 24000):
PLATFORM(k)
def initialize(): # called by main()
# thread for each type of connection setting
init_wad = threading.Thread(target=initial_WAD)
init_app = threading.Thread(target=initial_APP)
init_platform = threading.Thread(target=initial_PLATFORM)
# starting tread for initializing step 1
init_wad.start()
init_app.start()
init_platform.start()
def main(): # main function
global prints
initialize() # calling initializing mode
while(1):
# start managing server
stat = input('[ Server Setup : s ] [ Variable Setings : v ] [ Admin Menu : a ] [ Live view : i ] [ Emergency Proxy Shut-down : e ] ')
if (stat == 's')or(stat == 'a')or(stat == 'v'):
print('$ Comming soon...')
if stat == 'i':
while(1):
print_ask = input("$ Press 'enter' to go on")
if print_ask='':
prints = True # global var to alow system to print to consol about system flow
else:
|
time.sleep(10)
if stat == 'e':
disconnect_all()
if (__name__ == "__main__"): # modulize
main()
| prints = False
break | conditional_block |
# WAD SERVER TEST as MULTI-1.py | # WAD SERVER (DRONE JONGHAP GUWANJE SYSTEM)
# Licence Isaac Kim-leader of team RETELLIGENCE
# 2016
import threading
from socket import *
import time
import os
import random
from datetime import datetime
print('**************************************************************************')
print('* start_THE_First_WADING_Connection_VIA_RETELLIGENCE_IEZANOV_IWEN *')
print('**************************************************************************')
# make file directory
#os.makedirs('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen')
# open file
# file handle
#F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\User_info.txt', '+')
# file read
#R_users_h = F_users_h.read()
F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\WAD_pinList.txt', '+') # loading pin data from DBMS
WAD_pinList = []
R_users_h = F_users_h.read()
C_users_h = R_users_h # copy of pin data
while(1): # loading pin data for making user ID
try:
for i in range(len(C_users_h)):
if (C_users_h[i]=='#') and (C_users_h[i+1]=='#') and (C_users_h[i+2]=='#'):
WAD_pinList.append((C_users_h[i+3:i+12])
except:
break
# preset list
# 12000~24000 WAD
# 24000~36000 app
# 36000~48000 platform
Server_Rx_prot_len = 24
Server_TX_prot_len =
server_adr = getadr()
LIVE_WAD_list = []
x_code = '###WAD*#*' # Server Rx / WAD Tx
xx_code = '###WAD#*#' # Server Tx / WAD Rx
re_list = []
## WAD ##
# step 1
"""
Server_Rx_prot =
1.[9] ###WAD*#*
2.[9] ID : _ _ _ _ _ _ _ _ _
3.[6] PW : _ _ _ _ _ _
"""
# step 2n
"""
Server_Tx_prot =
1.[9] ###WAD#*#
2.[5] New Port number
3.[6] PW : _ _ _ _ _ _
4.[5] command : _ _ _ _ _
"""
# step 2n+1
"""
Server_Rx_data =
1.[9] x_code (###WAD*#*)
2.[9] ID
3.[6] PW
4.[6] status
5.[128] beacon
"""
## APP ##
## PLATFORM ##
# step 1
"""
Server_Rx_prot =
1.[9] x_code
2.[9] ID
3.[6] PW
"""
# step 2
"""
"""
def check_multi_id(id_give): # check if some one is using same id (register sequence on app)
if id_give in R_users_h:
return (1)
else:
return(0)
def getadr():
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("gmail.com",80))
r = s.getsockname()[0]
s.close()
print('Server IP address : ', r)
return (r)
def check_prot(wad_info):
pass
def disconnect_all():
serverSocket.close()
sys.exit()
print("Manually disconnected..")
time.sleep(0.5)
for i in range(9)
print("Programm will automaticly quit in %s seconds.." % str(9-int(i)))
time.sleep(1)
quit()
def save_leftover():
pass
def start_socket():
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, main_port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
##
### RETELLIGENCE WADING IN--on new thread
##class WADING(threading.Tread):
##
## def run(self, WAD):
##
## # WAD[0:9] = x_code
## # WAD[9:18] = WAD id
## # WAD[18:24] = WAD pw
## # WAD[24:29] = WAD new Port number
##
## given_PW =
## State = 'protocal_initializing'
##
## ### SERVER TX PROTOCALL DATA PACKET
## serverTx_protocall = xx_code + str(private_port) + '/' + str(given_PW) + '/' + str(State) + '/'
##
## ### socket BIND
## serverSocket = socket(AF_INET,SOCK_STREAM)
## serverSocket.bind((server_adr, private_port))
## serverSocket.listen(1)
## connectionSocket, addr = serverSocket.accept()
## connectionSocket.send = (serverTx_protocall.encode())
##
## recv_data = erverSovket.recv(1024)
##
## if (recv_data[0:9] == x_code):
## print(recv_data[9:18], ' recieving')
## a
## # error => break
## # what is suspend
##
##
##
##
##start_socket()
##
##
##while(1):
## request_list = []
## input_WAD = connectionSocket.recv(2048)
## while(1):
## if input_WAD != None:
##
## # length check
## if len(input_WAD)%24 == 0:
##
## # encription check
## if input_WAD[0:9] == x_code:
## ident_info = input_WAD[9:24]
## ident_info = '[' + ident_info + ']'
## if ident_info in WAD_ident_list:
## # send to request list
## request_list.append(input_WAD[0:24])
##
## # iteration go
## input_WAD = Input_WAD[24:]
## continue
## else:
## break
##
##
##
##
## for WAD in reauest_list:
##
## print('Starting new chapter')
## # set private port
## private_port = 12000 + 1
## while(1):
## if private_port in socket_list:
## private_port += 1
## else:
## break
## socket_list.append(private_port)
##
## # WAD send new port number
## Server_Tx_p = str(xx_code + private_port)
## serverSocket.send(Server_Tx_p.encode())
##
## # WAD info + port number
## WAD = WAD + str(private_port)
##
##
## # thread starting
## print(WAD)
## th = WADNIG(WAD)
## th.start()
##
## ## NOW U MAY ENTER WAD PROTOCAL
## WELCOME TO THE WORLD WIDE ASOCCIATION OF DRONES VIA INTERCONNECTED NETWORK INTEGERATION - RETELLIGENCE IWEN IEZANOV
#if 'Name' == __Main__:
def WADING(port):
while(1):
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
while(1):
in_1 = connectionSocket.recv(1024) # recieve from wad
if in_1[:9] != x_code:
printQ("!!! Unidentified connection", port, str(datetime.now())
connectionSocket.close()
break
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for WAD :: ', id_1, str(datetime.now())
| connectionSocket.send(server_Rx_wad)
def APP(port):
def Make_id_i():
global WAD_pinlist
type_WAD = 'i'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from app
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for app :: ', id_1, str(datetime.now())
def PLATFORM(port):
def Make_id_s():
global WAD_pinlist
type_WAD = 's'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
F_users_h.write('\n')
F_users_h.write(write_info)
F_users_h.write('\n')
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from platform
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for platform :: ', id_1, str(datetime.now())
def printQ(sent):
global prints
if prints == True:
print(sent)
def initial_WAD():
for i in range(12000, 24000):
WADING(i)
def initial_APP():
for j in range(12000, 24000):
APP(j)
def initial_PLATFORM():
for k in range(12000, 24000):
PLATFORM(k)
def initialize(): # called by main()
# thread for each type of connection setting
init_wad = threading.Thread(target=initial_WAD)
init_app = threading.Thread(target=initial_APP)
init_platform = threading.Thread(target=initial_PLATFORM)
# starting tread for initializing step 1
init_wad.start()
init_app.start()
init_platform.start()
def main(): # main function
global prints
initialize() # calling initializing mode
while(1):
# start managing server
stat = input('[ Server Setup : s ] [ Variable Setings : v ] [ Admin Menu : a ] [ Live view : i ] [ Emergency Proxy Shut-down : e ] ')
if (stat == 's')or(stat == 'a')or(stat == 'v'):
print('$ Comming soon...')
if stat == 'i':
while(1):
print_ask = input("$ Press 'enter' to go on")
if print_ask='':
prints = True # global var to alow system to print to consol about system flow
else:
prints = False
break
time.sleep(10)
if stat == 'e':
disconnect_all()
if (__name__ == "__main__"): # modulize
main() | random_line_split | |
# WAD SERVER TEST as MULTI-1.py | # WAD SERVER (DRONE JONGHAP GUWANJE SYSTEM)
# Licence Isaac Kim-leader of team RETELLIGENCE
# 2016
import threading
from socket import *
import time
import os
import random
from datetime import datetime
print('**************************************************************************')
print('* start_THE_First_WADING_Connection_VIA_RETELLIGENCE_IEZANOV_IWEN *')
print('**************************************************************************')
# make file directory
#os.makedirs('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen')
# open file
# file handle
#F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\User_info.txt', '+')
# file read
#R_users_h = F_users_h.read()
F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\WAD_pinList.txt', '+') # loading pin data from DBMS
WAD_pinList = []
R_users_h = F_users_h.read()
C_users_h = R_users_h # copy of pin data
while(1): # loading pin data for making user ID
try:
for i in range(len(C_users_h)):
if (C_users_h[i]=='#') and (C_users_h[i+1]=='#') and (C_users_h[i+2]=='#'):
WAD_pinList.append((C_users_h[i+3:i+12])
except:
break
# preset list
# 12000~24000 WAD
# 24000~36000 app
# 36000~48000 platform
Server_Rx_prot_len = 24
Server_TX_prot_len =
server_adr = getadr()
LIVE_WAD_list = []
x_code = '###WAD*#*' # Server Rx / WAD Tx
xx_code = '###WAD#*#' # Server Tx / WAD Rx
re_list = []
## WAD ##
# step 1
"""
Server_Rx_prot =
1.[9] ###WAD*#*
2.[9] ID : _ _ _ _ _ _ _ _ _
3.[6] PW : _ _ _ _ _ _
"""
# step 2n
"""
Server_Tx_prot =
1.[9] ###WAD#*#
2.[5] New Port number
3.[6] PW : _ _ _ _ _ _
4.[5] command : _ _ _ _ _
"""
# step 2n+1
"""
Server_Rx_data =
1.[9] x_code (###WAD*#*)
2.[9] ID
3.[6] PW
4.[6] status
5.[128] beacon
"""
## APP ##
## PLATFORM ##
# step 1
"""
Server_Rx_prot =
1.[9] x_code
2.[9] ID
3.[6] PW
"""
# step 2
"""
"""
def check_multi_id(id_give): # check if some one is using same id (register sequence on app)
if id_give in R_users_h:
return (1)
else:
return(0)
def getadr():
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("gmail.com",80))
r = s.getsockname()[0]
s.close()
print('Server IP address : ', r)
return (r)
def check_prot(wad_info):
pass
def disconnect_all():
serverSocket.close()
sys.exit()
print("Manually disconnected..")
time.sleep(0.5)
for i in range(9)
print("Programm will automaticly quit in %s seconds.." % str(9-int(i)))
time.sleep(1)
quit()
def save_leftover():
pass
def start_socket():
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, main_port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
##
### RETELLIGENCE WADING IN--on new thread
##class WADING(threading.Tread):
##
## def run(self, WAD):
##
## # WAD[0:9] = x_code
## # WAD[9:18] = WAD id
## # WAD[18:24] = WAD pw
## # WAD[24:29] = WAD new Port number
##
## given_PW =
## State = 'protocal_initializing'
##
## ### SERVER TX PROTOCALL DATA PACKET
## serverTx_protocall = xx_code + str(private_port) + '/' + str(given_PW) + '/' + str(State) + '/'
##
## ### socket BIND
## serverSocket = socket(AF_INET,SOCK_STREAM)
## serverSocket.bind((server_adr, private_port))
## serverSocket.listen(1)
## connectionSocket, addr = serverSocket.accept()
## connectionSocket.send = (serverTx_protocall.encode())
##
## recv_data = erverSovket.recv(1024)
##
## if (recv_data[0:9] == x_code):
## print(recv_data[9:18], ' recieving')
## a
## # error => break
## # what is suspend
##
##
##
##
##start_socket()
##
##
##while(1):
## request_list = []
## input_WAD = connectionSocket.recv(2048)
## while(1):
## if input_WAD != None:
##
## # length check
## if len(input_WAD)%24 == 0:
##
## # encription check
## if input_WAD[0:9] == x_code:
## ident_info = input_WAD[9:24]
## ident_info = '[' + ident_info + ']'
## if ident_info in WAD_ident_list:
## # send to request list
## request_list.append(input_WAD[0:24])
##
## # iteration go
## input_WAD = Input_WAD[24:]
## continue
## else:
## break
##
##
##
##
## for WAD in reauest_list:
##
## print('Starting new chapter')
## # set private port
## private_port = 12000 + 1
## while(1):
## if private_port in socket_list:
## private_port += 1
## else:
## break
## socket_list.append(private_port)
##
## # WAD send new port number
## Server_Tx_p = str(xx_code + private_port)
## serverSocket.send(Server_Tx_p.encode())
##
## # WAD info + port number
## WAD = WAD + str(private_port)
##
##
## # thread starting
## print(WAD)
## th = WADNIG(WAD)
## th.start()
##
## ## NOW U MAY ENTER WAD PROTOCAL
## WELCOME TO THE WORLD WIDE ASOCCIATION OF DRONES VIA INTERCONNECTED NETWORK INTEGERATION - RETELLIGENCE IWEN IEZANOV
#if 'Name' == __Main__:
def WADING(port):
while(1):
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
while(1):
in_1 = connectionSocket.recv(1024) # recieve from wad
if in_1[:9] != x_code:
printQ("!!! Unidentified connection", port, str(datetime.now())
connectionSocket.close()
break
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for WAD :: ', id_1, str(datetime.now())
connectionSocket.send(server_Rx_wad)
def APP(port):
def | ():
global WAD_pinlist
type_WAD = 'i'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from app
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for app :: ', id_1, str(datetime.now())
def PLATFORM(port):
def Make_id_s():
global WAD_pinlist
type_WAD = 's'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
F_users_h.write('\n')
F_users_h.write(write_info)
F_users_h.write('\n')
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from platform
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for platform :: ', id_1, str(datetime.now())
def printQ(sent):
global prints
if prints == True:
print(sent)
def initial_WAD():
for i in range(12000, 24000):
WADING(i)
def initial_APP():
for j in range(12000, 24000):
APP(j)
def initial_PLATFORM():
for k in range(12000, 24000):
PLATFORM(k)
def initialize(): # called by main()
# thread for each type of connection setting
init_wad = threading.Thread(target=initial_WAD)
init_app = threading.Thread(target=initial_APP)
init_platform = threading.Thread(target=initial_PLATFORM)
# starting tread for initializing step 1
init_wad.start()
init_app.start()
init_platform.start()
def main(): # main function
global prints
initialize() # calling initializing mode
while(1):
# start managing server
stat = input('[ Server Setup : s ] [ Variable Setings : v ] [ Admin Menu : a ] [ Live view : i ] [ Emergency Proxy Shut-down : e ] ')
if (stat == 's')or(stat == 'a')or(stat == 'v'):
print('$ Comming soon...')
if stat == 'i':
while(1):
print_ask = input("$ Press 'enter' to go on")
if print_ask='':
prints = True # global var to alow system to print to consol about system flow
else:
prints = False
break
time.sleep(10)
if stat == 'e':
disconnect_all()
if (__name__ == "__main__"): # modulize
main()
| Make_id_i | identifier_name |
# WAD SERVER TEST as MULTI-1.py | # WAD SERVER (DRONE JONGHAP GUWANJE SYSTEM)
# Licence Isaac Kim-leader of team RETELLIGENCE
# 2016
import threading
from socket import *
import time
import os
import random
from datetime import datetime
print('**************************************************************************')
print('* start_THE_First_WADING_Connection_VIA_RETELLIGENCE_IEZANOV_IWEN *')
print('**************************************************************************')
# make file directory
#os.makedirs('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen')
# open file
# file handle
#F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\User_info.txt', '+')
# file read
#R_users_h = F_users_h.read()
F_users_h = open('C:\\Users\\RETELLIGENCE\\Desktop\\WADING\\DB_iwen\\WAD_pinList.txt', '+') # loading pin data from DBMS
WAD_pinList = []
R_users_h = F_users_h.read()
C_users_h = R_users_h # copy of pin data
while(1): # loading pin data for making user ID
try:
for i in range(len(C_users_h)):
if (C_users_h[i]=='#') and (C_users_h[i+1]=='#') and (C_users_h[i+2]=='#'):
WAD_pinList.append((C_users_h[i+3:i+12])
except:
break
# preset list
# 12000~24000 WAD
# 24000~36000 app
# 36000~48000 platform
Server_Rx_prot_len = 24
Server_TX_prot_len =
server_adr = getadr()
LIVE_WAD_list = []
x_code = '###WAD*#*' # Server Rx / WAD Tx
xx_code = '###WAD#*#' # Server Tx / WAD Rx
re_list = []
## WAD ##
# step 1
"""
Server_Rx_prot =
1.[9] ###WAD*#*
2.[9] ID : _ _ _ _ _ _ _ _ _
3.[6] PW : _ _ _ _ _ _
"""
# step 2n
"""
Server_Tx_prot =
1.[9] ###WAD#*#
2.[5] New Port number
3.[6] PW : _ _ _ _ _ _
4.[5] command : _ _ _ _ _
"""
# step 2n+1
"""
Server_Rx_data =
1.[9] x_code (###WAD*#*)
2.[9] ID
3.[6] PW
4.[6] status
5.[128] beacon
"""
## APP ##
## PLATFORM ##
# step 1
"""
Server_Rx_prot =
1.[9] x_code
2.[9] ID
3.[6] PW
"""
# step 2
"""
"""
def check_multi_id(id_give): # check if some one is using same id (register sequence on app)
if id_give in R_users_h:
return (1)
else:
return(0)
def getadr():
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("gmail.com",80))
r = s.getsockname()[0]
s.close()
print('Server IP address : ', r)
return (r)
def check_prot(wad_info):
pass
def disconnect_all():
serverSocket.close()
sys.exit()
print("Manually disconnected..")
time.sleep(0.5)
for i in range(9)
print("Programm will automaticly quit in %s seconds.." % str(9-int(i)))
time.sleep(1)
quit()
def save_leftover():
|
def start_socket():
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, main_port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
##
### RETELLIGENCE WADING IN--on new thread
##class WADING(threading.Tread):
##
## def run(self, WAD):
##
## # WAD[0:9] = x_code
## # WAD[9:18] = WAD id
## # WAD[18:24] = WAD pw
## # WAD[24:29] = WAD new Port number
##
## given_PW =
## State = 'protocal_initializing'
##
## ### SERVER TX PROTOCALL DATA PACKET
## serverTx_protocall = xx_code + str(private_port) + '/' + str(given_PW) + '/' + str(State) + '/'
##
## ### socket BIND
## serverSocket = socket(AF_INET,SOCK_STREAM)
## serverSocket.bind((server_adr, private_port))
## serverSocket.listen(1)
## connectionSocket, addr = serverSocket.accept()
## connectionSocket.send = (serverTx_protocall.encode())
##
## recv_data = erverSovket.recv(1024)
##
## if (recv_data[0:9] == x_code):
## print(recv_data[9:18], ' recieving')
## a
## # error => break
## # what is suspend
##
##
##
##
##start_socket()
##
##
##while(1):
## request_list = []
## input_WAD = connectionSocket.recv(2048)
## while(1):
## if input_WAD != None:
##
## # length check
## if len(input_WAD)%24 == 0:
##
## # encription check
## if input_WAD[0:9] == x_code:
## ident_info = input_WAD[9:24]
## ident_info = '[' + ident_info + ']'
## if ident_info in WAD_ident_list:
## # send to request list
## request_list.append(input_WAD[0:24])
##
## # iteration go
## input_WAD = Input_WAD[24:]
## continue
## else:
## break
##
##
##
##
## for WAD in reauest_list:
##
## print('Starting new chapter')
## # set private port
## private_port = 12000 + 1
## while(1):
## if private_port in socket_list:
## private_port += 1
## else:
## break
## socket_list.append(private_port)
##
## # WAD send new port number
## Server_Tx_p = str(xx_code + private_port)
## serverSocket.send(Server_Tx_p.encode())
##
## # WAD info + port number
## WAD = WAD + str(private_port)
##
##
## # thread starting
## print(WAD)
## th = WADNIG(WAD)
## th.start()
##
## ## NOW U MAY ENTER WAD PROTOCAL
## WELCOME TO THE WORLD WIDE ASOCCIATION OF DRONES VIA INTERCONNECTED NETWORK INTEGERATION - RETELLIGENCE IWEN IEZANOV
#if 'Name' == __Main__:
def WADING(port):
while(1):
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
while(1):
in_1 = connectionSocket.recv(1024) # recieve from wad
if in_1[:9] != x_code:
printQ("!!! Unidentified connection", port, str(datetime.now())
connectionSocket.close()
break
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for WAD :: ', id_1, str(datetime.now())
connectionSocket.send(server_Rx_wad)
def APP(port):
def Make_id_i():
global WAD_pinlist
type_WAD = 'i'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from app
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for app :: ', id_1, str(datetime.now())
def PLATFORM(port):
def Make_id_s():
global WAD_pinlist
type_WAD = 's'
year_WAD = str((time.localtime(time.time()))[0])
pin_WAD = str(int(WAD_pinList[-1]) + 1)
if len(pin_WAD) > 4:
pin_WAD = pin_WAD[0:4]
ID_WAD = type_WAD + year_WAD[-2] + year_WAD[-1] + '0'*(4-len(pin_WAD)) + str(random.randint(11,99))
while(1):
if check_multi_id(ID_WAD) = 1:
ID_WAD = ID_WAD[0, 7] + str(random.randint(11,99))
continue
else:
while(1):
passward = input("Enter PW (6 numbers): ")
try:
passward = int(passward)
except:
continue
else:
if len(passward) != 6:
continue
elif len(passward) == 6:
break
break
print(ID_WAD)
print(passward)
stat = ('Confirm?(y/n) : ')
if stat == 'y':
write_info = '[' + ID_WAD + +', ' + str(passward) + ']'
F_users_h.write('\n')
F_users_h.write(write_info)
F_users_h.write('\n')
WAD_pinList.append(pin_WAD)
server_adr = getadr()
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((server_adr, port))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
in_1 = connectionSocket.recv(1024) # recieve from platform
id_1 = in_1[9:15]
printQ("$$ Port", port, 'connected for platform :: ', id_1, str(datetime.now())
def printQ(sent):
global prints
if prints == True:
print(sent)
def initial_WAD():
for i in range(12000, 24000):
WADING(i)
def initial_APP():
for j in range(12000, 24000):
APP(j)
def initial_PLATFORM():
for k in range(12000, 24000):
PLATFORM(k)
def initialize(): # called by main()
# thread for each type of connection setting
init_wad = threading.Thread(target=initial_WAD)
init_app = threading.Thread(target=initial_APP)
init_platform = threading.Thread(target=initial_PLATFORM)
# starting tread for initializing step 1
init_wad.start()
init_app.start()
init_platform.start()
def main(): # main function
global prints
initialize() # calling initializing mode
while(1):
# start managing server
stat = input('[ Server Setup : s ] [ Variable Setings : v ] [ Admin Menu : a ] [ Live view : i ] [ Emergency Proxy Shut-down : e ] ')
if (stat == 's')or(stat == 'a')or(stat == 'v'):
print('$ Comming soon...')
if stat == 'i':
while(1):
print_ask = input("$ Press 'enter' to go on")
if print_ask='':
prints = True # global var to alow system to print to consol about system flow
else:
prints = False
break
time.sleep(10)
if stat == 'e':
disconnect_all()
if (__name__ == "__main__"): # modulize
main()
| pass | identifier_body |
AlfaReativo.py | import random
import sys
import math
import time
n = 2
num_section = 2
cargo = {1:"ferro", 2:"bauxita", 3:"cobre", 4:"prata"}
#print cargo[3]
# time = [0,0,0,0,0,0,0]
num_time = 1000
section = [0,0]
alpha = 3
alphaV = 20
alphaN = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
pAlphaN = [[0,9],[10,19],[20,29],[30,39],[40,49],[50,59],[60,69],[70,79],[80,89],[90,99]]
B = 100
tal = 10
# sorteado = random.randrange(100)
# print (sorteado)
#min mi - Ai + ci
# Input parameters
# Ai = expected arrival time of vessel i
# Decision variabels
# mi = starting time of handling of vessel i
# ci = total handliing time of vessel i
#class Vessel that has the cargo type, the length from the vessel to
#allocate in sections and the days is the handling time
T = 0.1 #tempo necessário pra um crane carrgar/descarregar uma quantidade de carga, 0.1h
cranes = 3 #numero de cranes por vessel por seção
alfa = T/cranes
class Section:
def __init__(self, name, distance, time, x, y):
self.n = name
self.d = distance
self.t = time
self.x = x
self.y = y
def get_distance(self):
return self.d
def printing(self):
print("Seção "+self.n)
class Cargo:
def __init__(self, name, rate):
self.n = name
self.r = rate
def get_rate(self):
return self.r
def printing(self):
print("Carga "+self.n)
class Vessel:
def __init__(self, name, cargo_type, lenght, arrival):
self.n = name
self.c_t = cargo_type
self.l = lenght
self.a = arrival
def printing(self):
print ("Vessel "+self.n)
#print (self.c_t, self.l, self.d)
#class Yard has the cargo type, the distance from the location to the
#berth and the capacity from the yard
class Yard:
def __init__(self,name, cargo_type, distance, capacity, x, y, ativo, time):
self.n = name
self.c_t = cargo_type
self.d = distance
self.c = capacity
self.x = x
self.y = y
self.ativo = ativo
self.time = time
def printing(self):
print ("location "+self.n)
self.c_t.printing()
print (self.d, self.c)
def dist(self, berco):
return distancia(berco, self)
#class Column has the vessel
class Column:
def __init__(self, vessel, section, yard, beta, time):
self.s = section
self.y = yard
self.beta = beta
self.alfa = alfa
self.h_t = alfa + beta #handling time = tempo de load/unload da carga + tempo de transferencia da carga
self.v = vessel
self.s_t = time #starting time
self.c_t = ""
def printing(self):
self.v.printing()
self.s.printing()
self.y.printing()
print ("Starting Time: ",self.s_t, "Handling Time: ", self.h_t)
def belongs (self, vessel):
return vessel == self.v
def distance_cargo_section(cargo, yards, section):
d = 0
t = 0
for c in yards:
if c.c_t == cargo:
d += c.d
t += 1
d += section.d/t
return d
#creating the sections
# section1 = Section("1", 10, 1, 1, 1)
# section2 = Section("2", 10, 1, 5, 1)
# # section3 = Section("3", 30, 1, 11, 1)
# sections = [section1, section2]
#15 berços
# section1 = Section("1", 10, 1,1,1)
# section2 = Section("2", 10, 1,2,1)
# section3 = Section("3", 30, 1,4,1)
# section4 = Section("4", 30, 1,6,1)
# section5 = Section("5", 45, 1,9,1)
# section6 = Section("6", 45, 1,12,1)
# section7 = Section("7", 15, 1,15,1)
# section8 = Section("8", 15, 1,19,1)
# section9 = Section("9", 25, 1,22,1)
# section10 = Section("10", 25, 1,25,1)
# section11 = Section("11", 45, 1,28,1)
# section12 = Section("12", 15, 1,30,1)
# section13 = Section("13", 30, 1,32,1)
# section14 = Section("14", 10, 1,34,1)
# section15 = Section("15", 20, 1,35,1)
# sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10,section11,section12,section13,section14,section15]
#10 berços
section1 = Section("1", 10, 1,1,1)
section2 = Section("2", 10, 1,2,1)
section3 = Section("3", 30, 1,4,1)
section4 = Section("4", 30, 1,6,1)
section5 = Section("5", 45, 1,9,1)
section6 = Section("6", 45, 1,12,1)
section7 = Section("7", 15, 1,15,1)
section8 = Section("8", 15, 1,19,1)
section9 = Section("9", 25, 1,22,1)
section10 = Section("10", 25, 1,25,1)
sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10]
#creating the type of cargos
cargo1 = Cargo("cobre", 0.7)
cargo2 = Cargo("ferro", 0.8)
cargo3 = Cargo("bauxita", 0.9)
cargos = [cargo1, cargo2, cargo3]
#creating the vessels
vessel1 = Vessel("1",cargo1, 1, 1)
vessel2 = Vessel("2",cargo2, 1, 20)
vessel3 = Vessel("3",cargo2, 1, 30)
vessel4 = Vessel("4",cargo1, 1, 30)
vessel5 = Vessel("5",cargo2, 1, 30)
vessels = [vessel1, vessel2, vessel3, vessel4, vessel5]
vessels = []
instancias = open("instances20A400","r")
f = open("saida20A400.txt","w")
while True:
nome = instancias.readline()
if nome == "":
break
lenght = int(instancias.readline().split("\n")[0])
arrival = int(instancias.readline().split("\n")[0])
cargo_name = instancias.readline().split("\n")[0]
for c in cargos:
# print(c.n)
# print(cargo_name)
if (c.n == cargo_name):
# print("entrou")
vessel = Vessel(nome, c, lenght, arrival)
vessels.append(vessel)
break
#vessel1.printing()
#creating the yards
yard_location_1 = Yard("A",cargo2, 10, 100, 1, 15, True, 1)
yard_location_2 = Yard("B",cargo1, 20, 100, 1, 30, True, 1)
yard_location_3 = Yard("C",cargo2, 30, 100, 7, 15, True, 1)
yard_location_4 = Yard("D",cargo1, 25, 100, 7, 30, False, 1)
yard_location_5 = Yard("E",cargo2, 35, 100, 1, 45, False, 1)
yard_location_6 = Yard("F",cargo1, 40, 100, 1, 60, True, 1)
yard_location_7 = Yard("G",cargo2, 50, 100, 7, 45, True, 1)
yard_location_8 = Yard("H",cargo1, 45, 100, 7, 60, True, 1)
yards = [yard_location_1, yard_location_2, yard_location_3, yard_location_4,yard_location_5,yard_location_6,yard_location_7,yard_location_8]
#yard_location_1.printing()
columns = []
for v in vessels:
for j in range(len(sections)):
for i in range(num_time):
for y in yards:
if (v.c_t == y.c_t or y.c_t.n == ""):
distance = distance_cargo_section(y.c_t, yards, sections[j])
# print (distance)
beta = distance*v.c_t.get_rate()
if(v.a <= i):
column = Column(v, sections[j], y, beta, i)
else:
column = Column(v, sections[j], y, beta, v.a)
# if(y.n == "A" or y.n == "B"):
# column.c_t = y.c_t.n
# column.y.time = column.h_t
columns.append(column)
#print columns
# for c in columns:
# c.printing()
# print("\n")
def select_yard(vessel, yards):
set_yard = []
for y in yards:
if (v.c_t == y.c_t):
set_yard.append(y)
y = random.choice(set_yard)
return y
def distancia(berco, patio):
d = math.sqrt(pow((berco.x - patio.x),2) + pow((berco.y - patio.y),2))
return d
def select_yard_best(berco, yards, cargo_type):
menor = yards[0]
#menord = distancia(berco, yards[0])
yardsort = sorted(yards,key=lambda y: y.dist(berco))
# for y in yardsort:
# print(distancia(berco,y))
# for i in range(len(yardsort)):
# if(yardsort[i].ativo == True or (y.ativo == False and y.c_t == cargo_type)):
# flag = 1
# break
flag = 0
menort = yards[0].time
menord = sys.maxsize
for y in yards:
if (menord > distancia(berco,y) and (y.ativo == True or (y.ativo == False and y.c_t == cargo_type))):
flag = 1
menord = distancia(berco,y)
menor = y
if(flag == 0):
for y in yards:
if(menort > y.time and (y.c_t == cargo_type)):
menort = y.time
menor = y
else:
menor.ativo = False
menor.c_t = cargo_type
return menor
def select_yard_random(berco,yards):
return
def compatible (column, columns):
for c in columns:
# if (c.s == column.s and c.s_t <= column.s_t and (column.s_t + column.h_t) <= (c.s_t + c.h_t)):
# return False
if(column.s != c.s):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)): # terminar isto, tempo de pátio
# continue
# else:
# return False
# else:
# continue
continue
if((column.s_t + column.h_t <= c.s_t) or (column.s_t >= (c.s_t + c.h_t))):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)):
# continue
# else:
# return False
# else:
# continue
continue
else:
return False
# elif (c.s_t == column.s_t and c.y == column.y):
# return False
return True
def reset():
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def heuristic(vessels, columns):
best = []
for v in vessels:
for c in columns:
if best == []:
best.append(c)
c.c_t = v.c_t.n
break
elif c.belongs(v) and compatible(c, best):
best.append(c)
if(c.c_t == ""):
c.c_t = v.c_t.n
# c.y.time += c.h_t
break
return best
def fo(columns):
soma = 0
for c in columns:
soma += c.s_t - c.v.a + c.h_t
return soma
def new_heuristic(vessels, sections, yards, alfaqqr):
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
def swap(s1, s2):
s1, s2 = s2, s1
def reset(sections, yards):
for s in sections:
s.t = 1
| n yards:
y.time = 1
def new_heuristic_2opt(vessels, sections, yards, alfaqqr):
# vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
vesselsord = vessels
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
# def trocar_berços(vessel1, vessel2, columns):
# for c1 in columns:
# for c2 in columns:
# if(c1.vessel == vessel1 and c2.vessel == vessel2):
# return true
# def fo_trocado(vessel1, vessel2):
# def troca(vessel1, vessel2):
# def busca_local(vessels, sections, yards, alpha):
# best = new_heuristic(vessels, sections, yards, alphaN[i])
# neighbor = []
# new_best = best
# for v1 in vessels:
# menor = v1
# for v2 in vessels:
# if(v1 != v2 and trocar_berços(v1,v2, best)):
# neighbor.append(v2)
# if(neighbor != []):
# for n in neighbor:
# if (fo_trocado(v1, n) < new_best):
# new_best = fo_trocado(v1, n)
# menor = n
# if(new_best < best ):
# troca(v1, menor)
# return new_best
#função que verifica se é possivel trocar berços
def trocar_berços(vessel, section, column, columns):
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o tempo de inicio e fim do navio naquele berço
inicio = column.s_t
fim = column.s_t + column.h_t
#verifica se é possível encaixar a nova coluna no conjunto
for c in columns:
if(c != column and c.s == section):
if(c.s_t <= inicio and (c.s_t+c.h_t >= inicio)):
return False
elif(c.s_t <= fim and (c.s_t+c.h_t >= fim)):
return False
elif(c.s_t >= inicio and (c.s_t+c.h_t <= fim)):
return False
#Se passou pelo for quer dizer que a coluna pode ser trocada
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
return True
def fo_trocado(vessel, section, column, columns):
#calculo a fo atual
fo_atual = fo(columns)
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o fo da nova
fo_nova = fo(columns)
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
if(fo_nova < fo_atual):
print(fo_atual, fo_nova)
return True
else:
return False
def troca(vessel, section, column, columns):
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#atualiza o handling time
column.beta = beta
column.h_t = column.alfa + column.beta
def busca_local(vessels, sections, yards, a):
vesselscpy = vessels.copy()
best = new_heuristic(vesselscpy, sections, yards, a)
neighbor = []
new_best = best
navios = 1
colunas = 1
# for v in vessels: # para cada navio
# for c in best: #para cada coluna
# #procurando a coluna do návio
# if (c.v == v):
# for s in sections: #para cada berço
# if(c.s != s):
# #se for possível trocar o berço e se a fo dele trocado for melhor, ele troca
# if(trocar_berços(v, s, c, best) and fo_trocado(v, s, c, best)):
# troca(v, s, c, best)
# print("navios: ", navios)
# navios+=1
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# best = new_heuristic(vessels, sections, yards, a)
# new_best = best
menor = fo(best)
reset(sections, yards)
entrou = False
menori = 0
menorj = 0
for i in range(len(vesselsord)):
for j in range(i+1,len(vesselsord)):
print("i:", i , " j:", j)
swap(vesselsord[i], vesselsord[j])
vesselscopy = vesselsord.copy()
new_best = new_heuristic_2opt(vesselscopy, sections, yards, a)
print("menor: ", menor, " novo: ", fo(new_best))
if fo(new_best) < menor:
menor = fo(new_best)
menori = i
menorj = j
entrou = True
swap(vesselsord[i], vesselsord[j])
reset(sections, yards)
if entrou:
swap(vesselsord[menori], vesselsord[menorj])
entrou = False
print("menor fo:", menor)
# best = heuristic(vessels, columns)
# best_new = new_heuristic(vessels, sections, yards, alfaqqr)
# i = 0
# for c in best:
# i += 1
# j = 0
# print("Solução: \n")
# for c in best_new:
# c.printing()
# j += 1
# print("\n")
# print(i)
# print(j)
# print("Valor da FO = ", fo(best))
# print("Valor da FO nova = ", fo(best_new))
random.seed(0)
best = []
# alfa reativo
for k in range(100):
indice = 0
medias = [0,0,0,0,0,0,0,0,0,0]
soma = [0,0,0,0,0,0,0,0,0,0]
vetor_fo = [0,0,0,0,0,0,0,0,0,0]
qi = [0,0,0,0,0,0,0,0,0,0]
for j in range(B):
sorteado = random.randrange(100)
# print (sorteado)
for i in range(10):
# print(pAlphaN[i][0])
if(pAlphaN[i][0] <= sorteado and pAlphaN[i][1] >= sorteado):
indice = i
soma[i] += 1.0
# print(alphaN[i])
# inicio = time.time()
best = new_heuristic(vessels, sections, yards, alphaN[i])
# fim = time.time()
# print(fim - inicio)
f.write("alfa = ")
f.write(str(alphaN[i]))
f.write("\n")
f.write("Valor da FO nova = ")
f.write(str(fo(best)))
f.write("\n")
vetor_fo[i] += fo(best)
reset(sections, yards)
break
# print (sorteado, alphaN[indice])
for i in range(10):
if(soma[i] == 0):
qi[i] = 0
else:
medias[i] = vetor_fo[i]/soma[i]
qi[i] = (1/medias[i])*pow(10,5)
frase = "i: " + str(i) + "soma: " + str(soma[i]) + "FO: " + str(vetor_fo[i]) + "medias: " + str(medias[i]) + "qi: " + str(qi[i])
f.write(frase)
f.write("\n")
q = 0
for i in range(10):
q += qi[i]
print(q)
novopAlphaN =[]
somaP = 0
for i in range(10):
pAlphaN[i] = qi[i]/q*100
print(pAlphaN[i])
if(qi[i] == 0):
novopAlphaN.append([somaP, somaP])
else:
novopAlphaN.append([somaP, somaP + pAlphaN[i] - 1])
somaP += pAlphaN[i]
# print(soma)
for i in range(10):
f.write(str(novopAlphaN[i]))
f.write("\n")
pAlphaN = novopAlphaN
# for a in alphaN:
# print("alpha: ", a)
# busca_local(vessels, sections, yards, a)
#fazendo a busca local
# busca_local(vessels, sections, yards, 0.1)
#criar novo laço para testar a solução viável da literatura
#continuar a tabela, valores iguais no começo e intercalando depois
#aleatoridade com berço e pátio
#tentar fazer uma função gulosa onde para cada navio verifica a combinação de berço e pátio e vê qual a melhor, ou seja,
#fazer as colunas para cada navio
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#marcar o tempo de execução do algoritmo - gprof
#implementar o alfa reativo, alfa 1 e alfa 2, um para navio e um para berço, e a intercalação dos dois.
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#Testar com 10 instâncias de cada tamanho
#Automatizar a saida
#Colocar o guloso junto na tabela, e executar com 100000
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#Busca Local e GRASP
#busca local
| for y i | conditional_block |
AlfaReativo.py | import random
import sys
import math
import time
n = 2
num_section = 2
cargo = {1:"ferro", 2:"bauxita", 3:"cobre", 4:"prata"}
#print cargo[3]
# time = [0,0,0,0,0,0,0]
num_time = 1000
section = [0,0]
alpha = 3
alphaV = 20
alphaN = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
pAlphaN = [[0,9],[10,19],[20,29],[30,39],[40,49],[50,59],[60,69],[70,79],[80,89],[90,99]]
B = 100
tal = 10
# sorteado = random.randrange(100)
# print (sorteado)
#min mi - Ai + ci
# Input parameters
# Ai = expected arrival time of vessel i
# Decision variabels
# mi = starting time of handling of vessel i
# ci = total handliing time of vessel i
#class Vessel that has the cargo type, the length from the vessel to
#allocate in sections and the days is the handling time
T = 0.1 #tempo necessário pra um crane carrgar/descarregar uma quantidade de carga, 0.1h
cranes = 3 #numero de cranes por vessel por seção
alfa = T/cranes
class Section:
def __init__(self, name, distance, time, x, y):
self.n = name
self.d = distance
self.t = time
self.x = x
self.y = y
def get_distance(self):
return self.d
def printing(self):
print("Seção "+self.n)
class Cargo:
def __init__(self, name, rate):
self.n = name
self.r = rate
def get_rate(self):
retur | printing(self):
print("Carga "+self.n)
class Vessel:
def __init__(self, name, cargo_type, lenght, arrival):
self.n = name
self.c_t = cargo_type
self.l = lenght
self.a = arrival
def printing(self):
print ("Vessel "+self.n)
#print (self.c_t, self.l, self.d)
#class Yard has the cargo type, the distance from the location to the
#berth and the capacity from the yard
class Yard:
def __init__(self,name, cargo_type, distance, capacity, x, y, ativo, time):
self.n = name
self.c_t = cargo_type
self.d = distance
self.c = capacity
self.x = x
self.y = y
self.ativo = ativo
self.time = time
def printing(self):
print ("location "+self.n)
self.c_t.printing()
print (self.d, self.c)
def dist(self, berco):
return distancia(berco, self)
#class Column has the vessel
class Column:
def __init__(self, vessel, section, yard, beta, time):
self.s = section
self.y = yard
self.beta = beta
self.alfa = alfa
self.h_t = alfa + beta #handling time = tempo de load/unload da carga + tempo de transferencia da carga
self.v = vessel
self.s_t = time #starting time
self.c_t = ""
def printing(self):
self.v.printing()
self.s.printing()
self.y.printing()
print ("Starting Time: ",self.s_t, "Handling Time: ", self.h_t)
def belongs (self, vessel):
return vessel == self.v
def distance_cargo_section(cargo, yards, section):
d = 0
t = 0
for c in yards:
if c.c_t == cargo:
d += c.d
t += 1
d += section.d/t
return d
#creating the sections
# section1 = Section("1", 10, 1, 1, 1)
# section2 = Section("2", 10, 1, 5, 1)
# # section3 = Section("3", 30, 1, 11, 1)
# sections = [section1, section2]
#15 berços
# section1 = Section("1", 10, 1,1,1)
# section2 = Section("2", 10, 1,2,1)
# section3 = Section("3", 30, 1,4,1)
# section4 = Section("4", 30, 1,6,1)
# section5 = Section("5", 45, 1,9,1)
# section6 = Section("6", 45, 1,12,1)
# section7 = Section("7", 15, 1,15,1)
# section8 = Section("8", 15, 1,19,1)
# section9 = Section("9", 25, 1,22,1)
# section10 = Section("10", 25, 1,25,1)
# section11 = Section("11", 45, 1,28,1)
# section12 = Section("12", 15, 1,30,1)
# section13 = Section("13", 30, 1,32,1)
# section14 = Section("14", 10, 1,34,1)
# section15 = Section("15", 20, 1,35,1)
# sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10,section11,section12,section13,section14,section15]
#10 berços
section1 = Section("1", 10, 1,1,1)
section2 = Section("2", 10, 1,2,1)
section3 = Section("3", 30, 1,4,1)
section4 = Section("4", 30, 1,6,1)
section5 = Section("5", 45, 1,9,1)
section6 = Section("6", 45, 1,12,1)
section7 = Section("7", 15, 1,15,1)
section8 = Section("8", 15, 1,19,1)
section9 = Section("9", 25, 1,22,1)
section10 = Section("10", 25, 1,25,1)
sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10]
#creating the type of cargos
cargo1 = Cargo("cobre", 0.7)
cargo2 = Cargo("ferro", 0.8)
cargo3 = Cargo("bauxita", 0.9)
cargos = [cargo1, cargo2, cargo3]
#creating the vessels
vessel1 = Vessel("1",cargo1, 1, 1)
vessel2 = Vessel("2",cargo2, 1, 20)
vessel3 = Vessel("3",cargo2, 1, 30)
vessel4 = Vessel("4",cargo1, 1, 30)
vessel5 = Vessel("5",cargo2, 1, 30)
vessels = [vessel1, vessel2, vessel3, vessel4, vessel5]
vessels = []
instancias = open("instances20A400","r")
f = open("saida20A400.txt","w")
while True:
nome = instancias.readline()
if nome == "":
break
lenght = int(instancias.readline().split("\n")[0])
arrival = int(instancias.readline().split("\n")[0])
cargo_name = instancias.readline().split("\n")[0]
for c in cargos:
# print(c.n)
# print(cargo_name)
if (c.n == cargo_name):
# print("entrou")
vessel = Vessel(nome, c, lenght, arrival)
vessels.append(vessel)
break
#vessel1.printing()
#creating the yards
yard_location_1 = Yard("A",cargo2, 10, 100, 1, 15, True, 1)
yard_location_2 = Yard("B",cargo1, 20, 100, 1, 30, True, 1)
yard_location_3 = Yard("C",cargo2, 30, 100, 7, 15, True, 1)
yard_location_4 = Yard("D",cargo1, 25, 100, 7, 30, False, 1)
yard_location_5 = Yard("E",cargo2, 35, 100, 1, 45, False, 1)
yard_location_6 = Yard("F",cargo1, 40, 100, 1, 60, True, 1)
yard_location_7 = Yard("G",cargo2, 50, 100, 7, 45, True, 1)
yard_location_8 = Yard("H",cargo1, 45, 100, 7, 60, True, 1)
yards = [yard_location_1, yard_location_2, yard_location_3, yard_location_4,yard_location_5,yard_location_6,yard_location_7,yard_location_8]
#yard_location_1.printing()
columns = []
for v in vessels:
for j in range(len(sections)):
for i in range(num_time):
for y in yards:
if (v.c_t == y.c_t or y.c_t.n == ""):
distance = distance_cargo_section(y.c_t, yards, sections[j])
# print (distance)
beta = distance*v.c_t.get_rate()
if(v.a <= i):
column = Column(v, sections[j], y, beta, i)
else:
column = Column(v, sections[j], y, beta, v.a)
# if(y.n == "A" or y.n == "B"):
# column.c_t = y.c_t.n
# column.y.time = column.h_t
columns.append(column)
#print columns
# for c in columns:
# c.printing()
# print("\n")
def select_yard(vessel, yards):
set_yard = []
for y in yards:
if (v.c_t == y.c_t):
set_yard.append(y)
y = random.choice(set_yard)
return y
def distancia(berco, patio):
d = math.sqrt(pow((berco.x - patio.x),2) + pow((berco.y - patio.y),2))
return d
def select_yard_best(berco, yards, cargo_type):
menor = yards[0]
#menord = distancia(berco, yards[0])
yardsort = sorted(yards,key=lambda y: y.dist(berco))
# for y in yardsort:
# print(distancia(berco,y))
# for i in range(len(yardsort)):
# if(yardsort[i].ativo == True or (y.ativo == False and y.c_t == cargo_type)):
# flag = 1
# break
flag = 0
menort = yards[0].time
menord = sys.maxsize
for y in yards:
if (menord > distancia(berco,y) and (y.ativo == True or (y.ativo == False and y.c_t == cargo_type))):
flag = 1
menord = distancia(berco,y)
menor = y
if(flag == 0):
for y in yards:
if(menort > y.time and (y.c_t == cargo_type)):
menort = y.time
menor = y
else:
menor.ativo = False
menor.c_t = cargo_type
return menor
def select_yard_random(berco,yards):
return
def compatible (column, columns):
for c in columns:
# if (c.s == column.s and c.s_t <= column.s_t and (column.s_t + column.h_t) <= (c.s_t + c.h_t)):
# return False
if(column.s != c.s):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)): # terminar isto, tempo de pátio
# continue
# else:
# return False
# else:
# continue
continue
if((column.s_t + column.h_t <= c.s_t) or (column.s_t >= (c.s_t + c.h_t))):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)):
# continue
# else:
# return False
# else:
# continue
continue
else:
return False
# elif (c.s_t == column.s_t and c.y == column.y):
# return False
return True
def reset():
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def heuristic(vessels, columns):
best = []
for v in vessels:
for c in columns:
if best == []:
best.append(c)
c.c_t = v.c_t.n
break
elif c.belongs(v) and compatible(c, best):
best.append(c)
if(c.c_t == ""):
c.c_t = v.c_t.n
# c.y.time += c.h_t
break
return best
def fo(columns):
soma = 0
for c in columns:
soma += c.s_t - c.v.a + c.h_t
return soma
def new_heuristic(vessels, sections, yards, alfaqqr):
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
def swap(s1, s2):
s1, s2 = s2, s1
def reset(sections, yards):
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def new_heuristic_2opt(vessels, sections, yards, alfaqqr):
# vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
vesselsord = vessels
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
# def trocar_berços(vessel1, vessel2, columns):
# for c1 in columns:
# for c2 in columns:
# if(c1.vessel == vessel1 and c2.vessel == vessel2):
# return true
# def fo_trocado(vessel1, vessel2):
# def troca(vessel1, vessel2):
# def busca_local(vessels, sections, yards, alpha):
# best = new_heuristic(vessels, sections, yards, alphaN[i])
# neighbor = []
# new_best = best
# for v1 in vessels:
# menor = v1
# for v2 in vessels:
# if(v1 != v2 and trocar_berços(v1,v2, best)):
# neighbor.append(v2)
# if(neighbor != []):
# for n in neighbor:
# if (fo_trocado(v1, n) < new_best):
# new_best = fo_trocado(v1, n)
# menor = n
# if(new_best < best ):
# troca(v1, menor)
# return new_best
#função que verifica se é possivel trocar berços
def trocar_berços(vessel, section, column, columns):
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o tempo de inicio e fim do navio naquele berço
inicio = column.s_t
fim = column.s_t + column.h_t
#verifica se é possível encaixar a nova coluna no conjunto
for c in columns:
if(c != column and c.s == section):
if(c.s_t <= inicio and (c.s_t+c.h_t >= inicio)):
return False
elif(c.s_t <= fim and (c.s_t+c.h_t >= fim)):
return False
elif(c.s_t >= inicio and (c.s_t+c.h_t <= fim)):
return False
#Se passou pelo for quer dizer que a coluna pode ser trocada
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
return True
def fo_trocado(vessel, section, column, columns):
#calculo a fo atual
fo_atual = fo(columns)
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o fo da nova
fo_nova = fo(columns)
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
if(fo_nova < fo_atual):
print(fo_atual, fo_nova)
return True
else:
return False
def troca(vessel, section, column, columns):
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#atualiza o handling time
column.beta = beta
column.h_t = column.alfa + column.beta
def busca_local(vessels, sections, yards, a):
vesselscpy = vessels.copy()
best = new_heuristic(vesselscpy, sections, yards, a)
neighbor = []
new_best = best
navios = 1
colunas = 1
# for v in vessels: # para cada navio
# for c in best: #para cada coluna
# #procurando a coluna do návio
# if (c.v == v):
# for s in sections: #para cada berço
# if(c.s != s):
# #se for possível trocar o berço e se a fo dele trocado for melhor, ele troca
# if(trocar_berços(v, s, c, best) and fo_trocado(v, s, c, best)):
# troca(v, s, c, best)
# print("navios: ", navios)
# navios+=1
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# best = new_heuristic(vessels, sections, yards, a)
# new_best = best
menor = fo(best)
reset(sections, yards)
entrou = False
menori = 0
menorj = 0
for i in range(len(vesselsord)):
for j in range(i+1,len(vesselsord)):
print("i:", i , " j:", j)
swap(vesselsord[i], vesselsord[j])
vesselscopy = vesselsord.copy()
new_best = new_heuristic_2opt(vesselscopy, sections, yards, a)
print("menor: ", menor, " novo: ", fo(new_best))
if fo(new_best) < menor:
menor = fo(new_best)
menori = i
menorj = j
entrou = True
swap(vesselsord[i], vesselsord[j])
reset(sections, yards)
if entrou:
swap(vesselsord[menori], vesselsord[menorj])
entrou = False
print("menor fo:", menor)
# best = heuristic(vessels, columns)
# best_new = new_heuristic(vessels, sections, yards, alfaqqr)
# i = 0
# for c in best:
# i += 1
# j = 0
# print("Solução: \n")
# for c in best_new:
# c.printing()
# j += 1
# print("\n")
# print(i)
# print(j)
# print("Valor da FO = ", fo(best))
# print("Valor da FO nova = ", fo(best_new))
random.seed(0)
best = []
# alfa reativo
for k in range(100):
indice = 0
medias = [0,0,0,0,0,0,0,0,0,0]
soma = [0,0,0,0,0,0,0,0,0,0]
vetor_fo = [0,0,0,0,0,0,0,0,0,0]
qi = [0,0,0,0,0,0,0,0,0,0]
for j in range(B):
sorteado = random.randrange(100)
# print (sorteado)
for i in range(10):
# print(pAlphaN[i][0])
if(pAlphaN[i][0] <= sorteado and pAlphaN[i][1] >= sorteado):
indice = i
soma[i] += 1.0
# print(alphaN[i])
# inicio = time.time()
best = new_heuristic(vessels, sections, yards, alphaN[i])
# fim = time.time()
# print(fim - inicio)
f.write("alfa = ")
f.write(str(alphaN[i]))
f.write("\n")
f.write("Valor da FO nova = ")
f.write(str(fo(best)))
f.write("\n")
vetor_fo[i] += fo(best)
reset(sections, yards)
break
# print (sorteado, alphaN[indice])
for i in range(10):
if(soma[i] == 0):
qi[i] = 0
else:
medias[i] = vetor_fo[i]/soma[i]
qi[i] = (1/medias[i])*pow(10,5)
frase = "i: " + str(i) + "soma: " + str(soma[i]) + "FO: " + str(vetor_fo[i]) + "medias: " + str(medias[i]) + "qi: " + str(qi[i])
f.write(frase)
f.write("\n")
q = 0
for i in range(10):
q += qi[i]
print(q)
novopAlphaN =[]
somaP = 0
for i in range(10):
pAlphaN[i] = qi[i]/q*100
print(pAlphaN[i])
if(qi[i] == 0):
novopAlphaN.append([somaP, somaP])
else:
novopAlphaN.append([somaP, somaP + pAlphaN[i] - 1])
somaP += pAlphaN[i]
# print(soma)
for i in range(10):
f.write(str(novopAlphaN[i]))
f.write("\n")
pAlphaN = novopAlphaN
# for a in alphaN:
# print("alpha: ", a)
# busca_local(vessels, sections, yards, a)
#fazendo a busca local
# busca_local(vessels, sections, yards, 0.1)
#criar novo laço para testar a solução viável da literatura
#continuar a tabela, valores iguais no começo e intercalando depois
#aleatoridade com berço e pátio
#tentar fazer uma função gulosa onde para cada navio verifica a combinação de berço e pátio e vê qual a melhor, ou seja,
#fazer as colunas para cada navio
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#marcar o tempo de execução do algoritmo - gprof
#implementar o alfa reativo, alfa 1 e alfa 2, um para navio e um para berço, e a intercalação dos dois.
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#Testar com 10 instâncias de cada tamanho
#Automatizar a saida
#Colocar o guloso junto na tabela, e executar com 100000
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#Busca Local e GRASP
#busca local
| n self.r
def | identifier_body |
AlfaReativo.py | import random
import sys
import math
import time
n = 2
num_section = 2
cargo = {1:"ferro", 2:"bauxita", 3:"cobre", 4:"prata"}
#print cargo[3]
# time = [0,0,0,0,0,0,0]
num_time = 1000
section = [0,0]
alpha = 3
alphaV = 20
alphaN = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
pAlphaN = [[0,9],[10,19],[20,29],[30,39],[40,49],[50,59],[60,69],[70,79],[80,89],[90,99]]
B = 100
tal = 10
# sorteado = random.randrange(100)
# print (sorteado)
#min mi - Ai + ci
# Input parameters
# Ai = expected arrival time of vessel i
# Decision variabels
# mi = starting time of handling of vessel i
# ci = total handliing time of vessel i
#class Vessel that has the cargo type, the length from the vessel to
#allocate in sections and the days is the handling time
T = 0.1 #tempo necessário pra um crane carrgar/descarregar uma quantidade de carga, 0.1h
cranes = 3 #numero de cranes por vessel por seção
alfa = T/cranes
class Section:
def __init__(self, name, distance, time, x, y):
self.n = name
self.d = distance
self.t = time
self.x = x
self.y = y
def get_distance(self):
return self.d
def printing(self):
print("Seção "+self.n)
class Cargo:
def __init__(self, name, rate):
self.n = name
self.r = rate
def get_rate(self):
return self.r
def printing(self):
print("Carga "+self.n)
class Vessel:
def __init__(self, name, cargo_type, lenght, arrival):
self.n = name
self.c_t = cargo_type
self.l = lenght
self.a = arrival
def printing(self):
print ("Vessel "+self.n)
#print (self.c_t, self.l, self.d)
#class Yard has the cargo type, the distance from the location to the
#berth and the capacity from the yard
class Yard:
def __init__(self,name, cargo_type, distance, capacity, x, y, ativo, time):
self.n = name
self.c_t = cargo_type
self.d = distance
self.c = capacity
self.x = x
self.y = y
self.ativo = ativo
self.time = time
def printing(self):
print ("location "+self.n)
self.c_t.printing()
print (self.d, self.c)
def dist(self, berco):
return distancia(berco, self)
#class Column has the vessel
class Column:
def __init__(self, vessel, section, yard, beta, time):
self.s = section
self.y = yard
self.beta = beta
self.alfa = alfa
self.h_t = alfa + beta #handling time = tempo de load/unload da carga + tempo de transferencia da carga
self.v = vessel
self.s_t = time #starting time
self.c_t = ""
def printing(self):
self.v.printing()
self.s.printing()
self.y.printing()
print ("Starting Time: ",self.s_t, "Handling Time: ", self.h_t)
def belongs (self, vessel):
return vessel == self.v
def distance_cargo_section(cargo, yards, section):
d = 0
t = 0
for c in yards:
if c.c_t == cargo:
d += c.d
t += 1
d += section.d/t
return d
#creating the sections
# section1 = Section("1", 10, 1, 1, 1)
# section2 = Section("2", 10, 1, 5, 1)
# # section3 = Section("3", 30, 1, 11, 1)
# sections = [section1, section2]
#15 berços
# section1 = Section("1", 10, 1,1,1)
# section2 = Section("2", 10, 1,2,1)
# section3 = Section("3", 30, 1,4,1)
# section4 = Section("4", 30, 1,6,1)
# section5 = Section("5", 45, 1,9,1)
# section6 = Section("6", 45, 1,12,1)
# section7 = Section("7", 15, 1,15,1)
# section8 = Section("8", 15, 1,19,1)
# section9 = Section("9", 25, 1,22,1)
# section10 = Section("10", 25, 1,25,1)
# section11 = Section("11", 45, 1,28,1)
# section12 = Section("12", 15, 1,30,1)
# section13 = Section("13", 30, 1,32,1)
# section14 = Section("14", 10, 1,34,1)
# section15 = Section("15", 20, 1,35,1)
# sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10,section11,section12,section13,section14,section15]
#10 berços
section1 = Section("1", 10, 1,1,1)
section2 = Section("2", 10, 1,2,1)
section3 = Section("3", 30, 1,4,1)
section4 = Section("4", 30, 1,6,1)
section5 = Section("5", 45, 1,9,1)
section6 = Section("6", 45, 1,12,1)
section7 = Section("7", 15, 1,15,1)
section8 = Section("8", 15, 1,19,1)
section9 = Section("9", 25, 1,22,1)
section10 = Section("10", 25, 1,25,1)
sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10]
#creating the type of cargos
cargo1 = Cargo("cobre", 0.7)
cargo2 = Cargo("ferro", 0.8)
cargo3 = Cargo("bauxita", 0.9)
cargos = [cargo1, cargo2, cargo3]
#creating the vessels
vessel1 = Vessel("1",cargo1, 1, 1)
vessel2 = Vessel("2",cargo2, 1, 20)
vessel3 = Vessel("3",cargo2, 1, 30)
vessel4 = Vessel("4",cargo1, 1, 30)
vessel5 = Vessel("5",cargo2, 1, 30)
vessels = [vessel1, vessel2, vessel3, vessel4, vessel5]
vessels = []
instancias = open("instances20A400","r")
f = open("saida20A400.txt","w")
while True:
nome = instancias.readline()
if nome == "":
break
lenght = int(instancias.readline().split("\n")[0])
arrival = int(instancias.readline().split("\n")[0])
cargo_name = instancias.readline().split("\n")[0]
for c in cargos:
# print(c.n)
# print(cargo_name)
if (c.n == cargo_name):
# print("entrou")
vessel = Vessel(nome, c, lenght, arrival)
vessels.append(vessel)
break
#vessel1.printing()
#creating the yards
yard_location_1 = Yard("A",cargo2, 10, 100, 1, 15, True, 1)
yard_location_2 = Yard("B",cargo1, 20, 100, 1, 30, True, 1)
yard_location_3 = Yard("C",cargo2, 30, 100, 7, 15, True, 1)
yard_location_4 = Yard("D",cargo1, 25, 100, 7, 30, False, 1)
yard_location_5 = Yard("E",cargo2, 35, 100, 1, 45, False, 1)
yard_location_6 = Yard("F",cargo1, 40, 100, 1, 60, True, 1)
yard_location_7 = Yard("G",cargo2, 50, 100, 7, 45, True, 1)
yard_location_8 = Yard("H",cargo1, 45, 100, 7, 60, True, 1)
yards = [yard_location_1, yard_location_2, yard_location_3, yard_location_4,yard_location_5,yard_location_6,yard_location_7,yard_location_8]
#yard_location_1.printing()
columns = []
for v in vessels:
for j in range(len(sections)):
for i in range(num_time):
for y in yards:
if (v.c_t == y.c_t or y.c_t.n == ""):
distance = distance_cargo_section(y.c_t, yards, sections[j])
# print (distance)
beta = distance*v.c_t.get_rate()
if(v.a <= i):
column = Column(v, sections[j], y, beta, i)
else:
column = Column(v, sections[j], y, beta, v.a)
# if(y.n == "A" or y.n == "B"):
# column.c_t = y.c_t.n
# column.y.time = column.h_t
columns.append(column)
#print columns
# for c in columns:
# c.printing()
# print("\n")
def select_yard(vessel, yards):
set_yard = []
for y in yards:
if (v.c_t == y.c_t):
set_yard.append(y)
y = random.choice(set_yard)
return y
def distancia(berco, patio):
d = math.sqrt(pow((berco.x - patio.x),2) + pow((berco.y - patio.y),2))
return d
def select_yard_best(berco, yards, cargo_type):
menor = yards[0]
#menord = distancia(berco, yards[0])
yardsort = sorted(yards,key=lambda y: y.dist(berco))
# for y in yardsort:
# print(distancia(berco,y))
# for i in range(len(yardsort)):
# if(yardsort[i].ativo == True or (y.ativo == False and y.c_t == cargo_type)):
# flag = 1
# break
flag = 0
menort = yards[0].time
menord = sys.maxsize
for y in yards:
if (menord > distancia(berco,y) and (y.ativo == True or (y.ativo == False and y.c_t == cargo_type))):
flag = 1
menord = distancia(berco,y)
menor = y
if(flag == 0):
for y in yards:
if(menort > y.time and (y.c_t == cargo_type)):
menort = y.time
menor = y
else:
menor.ativo = False
menor.c_t = cargo_type
return menor
def select_yard_random(berco,yards):
return
def compatible (column, columns):
for c in columns:
# if (c.s == column.s and c.s_t <= column.s_t and (column.s_t + column.h_t) <= (c.s_t + c.h_t)):
# return False
if(column.s != c.s):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)): # terminar isto, tempo de pátio
# continue
# else:
# return False
# else:
# continue
continue
if((column.s_t + column.h_t <= c.s_t) or (column.s_t >= (c.s_t + c.h_t))):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)):
# continue
# else:
# return False
# else:
# continue
continue
else:
return False
# elif (c.s_t == column.s_t and c.y == column.y):
# return False
return True
def reset():
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def heuristic(vessels, columns):
best = []
for v in vessels:
for c in columns:
if best == []:
best.append(c)
c.c_t = v.c_t.n
break
elif c.belongs(v) and compatible(c, best):
best.append(c)
if(c.c_t == ""):
c.c_t = v.c_t.n
# c.y.time += c.h_t
break
return best
def fo(columns):
soma = 0
for c in columns:
soma += c.s_t - c.v.a + c.h_t
return soma
def new_heuristic(vessels, sections, yards, alfaqqr):
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
def swap(s1, s2):
s1, s2 = s2, s1
def reset(sections, yards):
for s in sections: | # vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
vesselsord = vessels
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
# def trocar_berços(vessel1, vessel2, columns):
# for c1 in columns:
# for c2 in columns:
# if(c1.vessel == vessel1 and c2.vessel == vessel2):
# return true
# def fo_trocado(vessel1, vessel2):
# def troca(vessel1, vessel2):
# def busca_local(vessels, sections, yards, alpha):
# best = new_heuristic(vessels, sections, yards, alphaN[i])
# neighbor = []
# new_best = best
# for v1 in vessels:
# menor = v1
# for v2 in vessels:
# if(v1 != v2 and trocar_berços(v1,v2, best)):
# neighbor.append(v2)
# if(neighbor != []):
# for n in neighbor:
# if (fo_trocado(v1, n) < new_best):
# new_best = fo_trocado(v1, n)
# menor = n
# if(new_best < best ):
# troca(v1, menor)
# return new_best
#função que verifica se é possivel trocar berços
def trocar_berços(vessel, section, column, columns):
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o tempo de inicio e fim do navio naquele berço
inicio = column.s_t
fim = column.s_t + column.h_t
#verifica se é possível encaixar a nova coluna no conjunto
for c in columns:
if(c != column and c.s == section):
if(c.s_t <= inicio and (c.s_t+c.h_t >= inicio)):
return False
elif(c.s_t <= fim and (c.s_t+c.h_t >= fim)):
return False
elif(c.s_t >= inicio and (c.s_t+c.h_t <= fim)):
return False
#Se passou pelo for quer dizer que a coluna pode ser trocada
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
return True
def fo_trocado(vessel, section, column, columns):
#calculo a fo atual
fo_atual = fo(columns)
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o fo da nova
fo_nova = fo(columns)
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
if(fo_nova < fo_atual):
print(fo_atual, fo_nova)
return True
else:
return False
def troca(vessel, section, column, columns):
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#atualiza o handling time
column.beta = beta
column.h_t = column.alfa + column.beta
def busca_local(vessels, sections, yards, a):
vesselscpy = vessels.copy()
best = new_heuristic(vesselscpy, sections, yards, a)
neighbor = []
new_best = best
navios = 1
colunas = 1
# for v in vessels: # para cada navio
# for c in best: #para cada coluna
# #procurando a coluna do návio
# if (c.v == v):
# for s in sections: #para cada berço
# if(c.s != s):
# #se for possível trocar o berço e se a fo dele trocado for melhor, ele troca
# if(trocar_berços(v, s, c, best) and fo_trocado(v, s, c, best)):
# troca(v, s, c, best)
# print("navios: ", navios)
# navios+=1
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# best = new_heuristic(vessels, sections, yards, a)
# new_best = best
menor = fo(best)
reset(sections, yards)
entrou = False
menori = 0
menorj = 0
for i in range(len(vesselsord)):
for j in range(i+1,len(vesselsord)):
print("i:", i , " j:", j)
swap(vesselsord[i], vesselsord[j])
vesselscopy = vesselsord.copy()
new_best = new_heuristic_2opt(vesselscopy, sections, yards, a)
print("menor: ", menor, " novo: ", fo(new_best))
if fo(new_best) < menor:
menor = fo(new_best)
menori = i
menorj = j
entrou = True
swap(vesselsord[i], vesselsord[j])
reset(sections, yards)
if entrou:
swap(vesselsord[menori], vesselsord[menorj])
entrou = False
print("menor fo:", menor)
# best = heuristic(vessels, columns)
# best_new = new_heuristic(vessels, sections, yards, alfaqqr)
# i = 0
# for c in best:
# i += 1
# j = 0
# print("Solução: \n")
# for c in best_new:
# c.printing()
# j += 1
# print("\n")
# print(i)
# print(j)
# print("Valor da FO = ", fo(best))
# print("Valor da FO nova = ", fo(best_new))
random.seed(0)
best = []
# alfa reativo
for k in range(100):
indice = 0
medias = [0,0,0,0,0,0,0,0,0,0]
soma = [0,0,0,0,0,0,0,0,0,0]
vetor_fo = [0,0,0,0,0,0,0,0,0,0]
qi = [0,0,0,0,0,0,0,0,0,0]
for j in range(B):
sorteado = random.randrange(100)
# print (sorteado)
for i in range(10):
# print(pAlphaN[i][0])
if(pAlphaN[i][0] <= sorteado and pAlphaN[i][1] >= sorteado):
indice = i
soma[i] += 1.0
# print(alphaN[i])
# inicio = time.time()
best = new_heuristic(vessels, sections, yards, alphaN[i])
# fim = time.time()
# print(fim - inicio)
f.write("alfa = ")
f.write(str(alphaN[i]))
f.write("\n")
f.write("Valor da FO nova = ")
f.write(str(fo(best)))
f.write("\n")
vetor_fo[i] += fo(best)
reset(sections, yards)
break
# print (sorteado, alphaN[indice])
for i in range(10):
if(soma[i] == 0):
qi[i] = 0
else:
medias[i] = vetor_fo[i]/soma[i]
qi[i] = (1/medias[i])*pow(10,5)
frase = "i: " + str(i) + "soma: " + str(soma[i]) + "FO: " + str(vetor_fo[i]) + "medias: " + str(medias[i]) + "qi: " + str(qi[i])
f.write(frase)
f.write("\n")
q = 0
for i in range(10):
q += qi[i]
print(q)
novopAlphaN =[]
somaP = 0
for i in range(10):
pAlphaN[i] = qi[i]/q*100
print(pAlphaN[i])
if(qi[i] == 0):
novopAlphaN.append([somaP, somaP])
else:
novopAlphaN.append([somaP, somaP + pAlphaN[i] - 1])
somaP += pAlphaN[i]
# print(soma)
for i in range(10):
f.write(str(novopAlphaN[i]))
f.write("\n")
pAlphaN = novopAlphaN
# for a in alphaN:
# print("alpha: ", a)
# busca_local(vessels, sections, yards, a)
#fazendo a busca local
# busca_local(vessels, sections, yards, 0.1)
#criar novo laço para testar a solução viável da literatura
#continuar a tabela, valores iguais no começo e intercalando depois
#aleatoridade com berço e pátio
#tentar fazer uma função gulosa onde para cada navio verifica a combinação de berço e pátio e vê qual a melhor, ou seja,
#fazer as colunas para cada navio
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#marcar o tempo de execução do algoritmo - gprof
#implementar o alfa reativo, alfa 1 e alfa 2, um para navio e um para berço, e a intercalação dos dois.
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#Testar com 10 instâncias de cada tamanho
#Automatizar a saida
#Colocar o guloso junto na tabela, e executar com 100000
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#Busca Local e GRASP
#busca local | s.t = 1
for y in yards:
y.time = 1
def new_heuristic_2opt(vessels, sections, yards, alfaqqr): | random_line_split |
AlfaReativo.py | import random
import sys
import math
import time
n = 2
num_section = 2
cargo = {1:"ferro", 2:"bauxita", 3:"cobre", 4:"prata"}
#print cargo[3]
# time = [0,0,0,0,0,0,0]
num_time = 1000
section = [0,0]
alpha = 3
alphaV = 20
alphaN = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
pAlphaN = [[0,9],[10,19],[20,29],[30,39],[40,49],[50,59],[60,69],[70,79],[80,89],[90,99]]
B = 100
tal = 10
# sorteado = random.randrange(100)
# print (sorteado)
#min mi - Ai + ci
# Input parameters
# Ai = expected arrival time of vessel i
# Decision variabels
# mi = starting time of handling of vessel i
# ci = total handliing time of vessel i
#class Vessel that has the cargo type, the length from the vessel to
#allocate in sections and the days is the handling time
T = 0.1 #tempo necessário pra um crane carrgar/descarregar uma quantidade de carga, 0.1h
cranes = 3 #numero de cranes por vessel por seção
alfa = T/cranes
class Section:
def __init__(self, name, distance, time, x, y):
self.n = name
self.d = distance
self.t = time
self.x = x
self.y = y
def get_distance(self):
return self.d
def printing(self):
print("Seção "+self.n)
class Cargo:
def __init__(self, name, rate):
self.n = name
self.r = rate
def get_rate(self):
return self.r
def printing(self):
print("Carga "+self.n)
class Vessel:
def __init__(self, name, cargo_type, lenght, arrival):
self.n = name
self.c_t = cargo_type
self.l = lenght
self.a = arrival
def printing(self):
print ("Vessel "+self.n)
#print (self.c_t, self.l, self.d)
#class Yard has the cargo type, the distance from the location to the
#berth and the capacity from the yard
class Yard:
def __init__(self,name, cargo_type, distance, capacity, x, y, ativo, time):
self.n = name
self.c_t = cargo_type
self.d = distance
self.c = capacity
self.x = x
self.y = y
self.ativo = ativo
self.time = time
def printing(self):
print ("location "+self.n)
self.c_t.printing()
print (self.d, self.c)
def dist(self, berco):
return distancia(berco, self)
#class Column has the vessel
class Colum | f __init__(self, vessel, section, yard, beta, time):
self.s = section
self.y = yard
self.beta = beta
self.alfa = alfa
self.h_t = alfa + beta #handling time = tempo de load/unload da carga + tempo de transferencia da carga
self.v = vessel
self.s_t = time #starting time
self.c_t = ""
def printing(self):
self.v.printing()
self.s.printing()
self.y.printing()
print ("Starting Time: ",self.s_t, "Handling Time: ", self.h_t)
def belongs (self, vessel):
return vessel == self.v
def distance_cargo_section(cargo, yards, section):
d = 0
t = 0
for c in yards:
if c.c_t == cargo:
d += c.d
t += 1
d += section.d/t
return d
#creating the sections
# section1 = Section("1", 10, 1, 1, 1)
# section2 = Section("2", 10, 1, 5, 1)
# # section3 = Section("3", 30, 1, 11, 1)
# sections = [section1, section2]
#15 berços
# section1 = Section("1", 10, 1,1,1)
# section2 = Section("2", 10, 1,2,1)
# section3 = Section("3", 30, 1,4,1)
# section4 = Section("4", 30, 1,6,1)
# section5 = Section("5", 45, 1,9,1)
# section6 = Section("6", 45, 1,12,1)
# section7 = Section("7", 15, 1,15,1)
# section8 = Section("8", 15, 1,19,1)
# section9 = Section("9", 25, 1,22,1)
# section10 = Section("10", 25, 1,25,1)
# section11 = Section("11", 45, 1,28,1)
# section12 = Section("12", 15, 1,30,1)
# section13 = Section("13", 30, 1,32,1)
# section14 = Section("14", 10, 1,34,1)
# section15 = Section("15", 20, 1,35,1)
# sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10,section11,section12,section13,section14,section15]
#10 berços
section1 = Section("1", 10, 1,1,1)
section2 = Section("2", 10, 1,2,1)
section3 = Section("3", 30, 1,4,1)
section4 = Section("4", 30, 1,6,1)
section5 = Section("5", 45, 1,9,1)
section6 = Section("6", 45, 1,12,1)
section7 = Section("7", 15, 1,15,1)
section8 = Section("8", 15, 1,19,1)
section9 = Section("9", 25, 1,22,1)
section10 = Section("10", 25, 1,25,1)
sections = [section1,section2,section3,section4,section5,section6,section7,section8,section9,section10]
#creating the type of cargos
cargo1 = Cargo("cobre", 0.7)
cargo2 = Cargo("ferro", 0.8)
cargo3 = Cargo("bauxita", 0.9)
cargos = [cargo1, cargo2, cargo3]
#creating the vessels
vessel1 = Vessel("1",cargo1, 1, 1)
vessel2 = Vessel("2",cargo2, 1, 20)
vessel3 = Vessel("3",cargo2, 1, 30)
vessel4 = Vessel("4",cargo1, 1, 30)
vessel5 = Vessel("5",cargo2, 1, 30)
vessels = [vessel1, vessel2, vessel3, vessel4, vessel5]
vessels = []
instancias = open("instances20A400","r")
f = open("saida20A400.txt","w")
while True:
nome = instancias.readline()
if nome == "":
break
lenght = int(instancias.readline().split("\n")[0])
arrival = int(instancias.readline().split("\n")[0])
cargo_name = instancias.readline().split("\n")[0]
for c in cargos:
# print(c.n)
# print(cargo_name)
if (c.n == cargo_name):
# print("entrou")
vessel = Vessel(nome, c, lenght, arrival)
vessels.append(vessel)
break
#vessel1.printing()
#creating the yards
yard_location_1 = Yard("A",cargo2, 10, 100, 1, 15, True, 1)
yard_location_2 = Yard("B",cargo1, 20, 100, 1, 30, True, 1)
yard_location_3 = Yard("C",cargo2, 30, 100, 7, 15, True, 1)
yard_location_4 = Yard("D",cargo1, 25, 100, 7, 30, False, 1)
yard_location_5 = Yard("E",cargo2, 35, 100, 1, 45, False, 1)
yard_location_6 = Yard("F",cargo1, 40, 100, 1, 60, True, 1)
yard_location_7 = Yard("G",cargo2, 50, 100, 7, 45, True, 1)
yard_location_8 = Yard("H",cargo1, 45, 100, 7, 60, True, 1)
yards = [yard_location_1, yard_location_2, yard_location_3, yard_location_4,yard_location_5,yard_location_6,yard_location_7,yard_location_8]
#yard_location_1.printing()
columns = []
for v in vessels:
for j in range(len(sections)):
for i in range(num_time):
for y in yards:
if (v.c_t == y.c_t or y.c_t.n == ""):
distance = distance_cargo_section(y.c_t, yards, sections[j])
# print (distance)
beta = distance*v.c_t.get_rate()
if(v.a <= i):
column = Column(v, sections[j], y, beta, i)
else:
column = Column(v, sections[j], y, beta, v.a)
# if(y.n == "A" or y.n == "B"):
# column.c_t = y.c_t.n
# column.y.time = column.h_t
columns.append(column)
#print columns
# for c in columns:
# c.printing()
# print("\n")
def select_yard(vessel, yards):
set_yard = []
for y in yards:
if (v.c_t == y.c_t):
set_yard.append(y)
y = random.choice(set_yard)
return y
def distancia(berco, patio):
d = math.sqrt(pow((berco.x - patio.x),2) + pow((berco.y - patio.y),2))
return d
def select_yard_best(berco, yards, cargo_type):
menor = yards[0]
#menord = distancia(berco, yards[0])
yardsort = sorted(yards,key=lambda y: y.dist(berco))
# for y in yardsort:
# print(distancia(berco,y))
# for i in range(len(yardsort)):
# if(yardsort[i].ativo == True or (y.ativo == False and y.c_t == cargo_type)):
# flag = 1
# break
flag = 0
menort = yards[0].time
menord = sys.maxsize
for y in yards:
if (menord > distancia(berco,y) and (y.ativo == True or (y.ativo == False and y.c_t == cargo_type))):
flag = 1
menord = distancia(berco,y)
menor = y
if(flag == 0):
for y in yards:
if(menort > y.time and (y.c_t == cargo_type)):
menort = y.time
menor = y
else:
menor.ativo = False
menor.c_t = cargo_type
return menor
def select_yard_random(berco,yards):
return
def compatible (column, columns):
for c in columns:
# if (c.s == column.s and c.s_t <= column.s_t and (column.s_t + column.h_t) <= (c.s_t + c.h_t)):
# return False
if(column.s != c.s):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)): # terminar isto, tempo de pátio
# continue
# else:
# return False
# else:
# continue
continue
if((column.s_t + column.h_t <= c.s_t) or (column.s_t >= (c.s_t + c.h_t))):
if(column.c_t == c.c_t or c.c_t == ""):
# if(c.y == column.y):
# if(c.y.time <= (column.y.time - column.h_t)):
# continue
# else:
# return False
# else:
# continue
continue
else:
return False
# elif (c.s_t == column.s_t and c.y == column.y):
# return False
return True
def reset():
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def heuristic(vessels, columns):
best = []
for v in vessels:
for c in columns:
if best == []:
best.append(c)
c.c_t = v.c_t.n
break
elif c.belongs(v) and compatible(c, best):
best.append(c)
if(c.c_t == ""):
c.c_t = v.c_t.n
# c.y.time += c.h_t
break
return best
def fo(columns):
soma = 0
for c in columns:
soma += c.s_t - c.v.a + c.h_t
return soma
def new_heuristic(vessels, sections, yards, alfaqqr):
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
def swap(s1, s2):
s1, s2 = s2, s1
def reset(sections, yards):
for s in sections:
s.t = 1
for y in yards:
y.time = 1
def new_heuristic_2opt(vessels, sections, yards, alfaqqr):
# vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
vesselsord = vessels
# for v in vessels:
# v.printing()
columns = []
menor = sections[0]
menorTime = sections[0].t
while(vesselsord != []):
#pegando o melhor navio
# v = vesselsord.pop(0)
#pegando os alpha melhores navios
melhoresVessels = []
if(int(len(vesselsord)*alfaqqr) <= 1):
alfaV = 1
else:
alfaV = int(len(vesselsord)*alfaqqr)
for i in range(alfaV):
melhoresVessels.append(vesselsord[i])
v = random.choice(melhoresVessels)
vesselsord.remove(v)
newsort = sorted(sections, key=lambda x: x.t)
# for k in newsort:
# print (k.t)
# for k in sections:
# if (menorTime > k.t):
# menor = k
# menorTime = k.t
#pegando o melhor berço
# menor = newsort[0]
# menorTime = menor.t
#pegando os alpha melhores berços
sectionRandom = []
r = int(alfaqqr*len(sections))
for i in range(r):
sectionRandom.append(newsort[i])
menor = random.choice(sectionRandom)
menorTime = menor.t
yard = select_yard_best(menor, yards, v.c_t)
distance = distance_cargo_section(yard.c_t, yards, menor)
beta = distance*v.c_t.get_rate()
# if(v.a > menorTime and v.a > yard.time):
# inicio_tempo = v.a
# elif(yard.time > menorTime and yard.time > v.a):
# inicio_tempo = yard.time
# else:
# inicio_tempo = menorTime
if(v.a > menorTime):
if(v.a > yard.time):
inicio_tempo = v.a
else:
inicio_tempo = yard.time
else:
if(menorTime > yard.time):
inicio_tempo = menorTime
else:
inicio_tempo = yard.time
# print(v.a, menorTime, yard.time )
column = Column(v, menor, yard, beta, inicio_tempo)
columns.append(column)
menor.t = column.s_t + column.h_t
yard.time = column.s_t + column.h_t #aqui
menorTime = sys.maxsize
return columns
# def trocar_berços(vessel1, vessel2, columns):
# for c1 in columns:
# for c2 in columns:
# if(c1.vessel == vessel1 and c2.vessel == vessel2):
# return true
# def fo_trocado(vessel1, vessel2):
# def troca(vessel1, vessel2):
# def busca_local(vessels, sections, yards, alpha):
# best = new_heuristic(vessels, sections, yards, alphaN[i])
# neighbor = []
# new_best = best
# for v1 in vessels:
# menor = v1
# for v2 in vessels:
# if(v1 != v2 and trocar_berços(v1,v2, best)):
# neighbor.append(v2)
# if(neighbor != []):
# for n in neighbor:
# if (fo_trocado(v1, n) < new_best):
# new_best = fo_trocado(v1, n)
# menor = n
# if(new_best < best ):
# troca(v1, menor)
# return new_best
#função que verifica se é possivel trocar berços
def trocar_berços(vessel, section, column, columns):
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o tempo de inicio e fim do navio naquele berço
inicio = column.s_t
fim = column.s_t + column.h_t
#verifica se é possível encaixar a nova coluna no conjunto
for c in columns:
if(c != column and c.s == section):
if(c.s_t <= inicio and (c.s_t+c.h_t >= inicio)):
return False
elif(c.s_t <= fim and (c.s_t+c.h_t >= fim)):
return False
elif(c.s_t >= inicio and (c.s_t+c.h_t <= fim)):
return False
#Se passou pelo for quer dizer que a coluna pode ser trocada
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
return True
def fo_trocado(vessel, section, column, columns):
#calculo a fo atual
fo_atual = fo(columns)
#salva o berço anterior
berco_anterior = column.s
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#salva as variaveis antigas e troca o beta e handling time
old_beta = column.beta
old_h_t = column.h_t
column.beta = beta
column.h_t = column.alfa + column.beta
#calcula o fo da nova
fo_nova = fo(columns)
#retorna as variaveis pros seus valores anteriores
column.s = berco_anterior
column.beta = old_beta
column.h_t = old_h_t
if(fo_nova < fo_atual):
print(fo_atual, fo_nova)
return True
else:
return False
def troca(vessel, section, column, columns):
#troca o berço
column.s = section
#calcula o novo handling time
distance = distance_cargo_section(column.y.c_t, yards, section)
beta = distance*vessel.c_t.get_rate()
#atualiza o handling time
column.beta = beta
column.h_t = column.alfa + column.beta
def busca_local(vessels, sections, yards, a):
vesselscpy = vessels.copy()
best = new_heuristic(vesselscpy, sections, yards, a)
neighbor = []
new_best = best
navios = 1
colunas = 1
# for v in vessels: # para cada navio
# for c in best: #para cada coluna
# #procurando a coluna do návio
# if (c.v == v):
# for s in sections: #para cada berço
# if(c.s != s):
# #se for possível trocar o berço e se a fo dele trocado for melhor, ele troca
# if(trocar_berços(v, s, c, best) and fo_trocado(v, s, c, best)):
# troca(v, s, c, best)
# print("navios: ", navios)
# navios+=1
vesselsord = sorted(vessels, key=lambda vessel: vessel.a)
# best = new_heuristic(vessels, sections, yards, a)
# new_best = best
menor = fo(best)
reset(sections, yards)
entrou = False
menori = 0
menorj = 0
for i in range(len(vesselsord)):
for j in range(i+1,len(vesselsord)):
print("i:", i , " j:", j)
swap(vesselsord[i], vesselsord[j])
vesselscopy = vesselsord.copy()
new_best = new_heuristic_2opt(vesselscopy, sections, yards, a)
print("menor: ", menor, " novo: ", fo(new_best))
if fo(new_best) < menor:
menor = fo(new_best)
menori = i
menorj = j
entrou = True
swap(vesselsord[i], vesselsord[j])
reset(sections, yards)
if entrou:
swap(vesselsord[menori], vesselsord[menorj])
entrou = False
print("menor fo:", menor)
# best = heuristic(vessels, columns)
# best_new = new_heuristic(vessels, sections, yards, alfaqqr)
# i = 0
# for c in best:
# i += 1
# j = 0
# print("Solução: \n")
# for c in best_new:
# c.printing()
# j += 1
# print("\n")
# print(i)
# print(j)
# print("Valor da FO = ", fo(best))
# print("Valor da FO nova = ", fo(best_new))
random.seed(0)
best = []
# alfa reativo
for k in range(100):
indice = 0
medias = [0,0,0,0,0,0,0,0,0,0]
soma = [0,0,0,0,0,0,0,0,0,0]
vetor_fo = [0,0,0,0,0,0,0,0,0,0]
qi = [0,0,0,0,0,0,0,0,0,0]
for j in range(B):
sorteado = random.randrange(100)
# print (sorteado)
for i in range(10):
# print(pAlphaN[i][0])
if(pAlphaN[i][0] <= sorteado and pAlphaN[i][1] >= sorteado):
indice = i
soma[i] += 1.0
# print(alphaN[i])
# inicio = time.time()
best = new_heuristic(vessels, sections, yards, alphaN[i])
# fim = time.time()
# print(fim - inicio)
f.write("alfa = ")
f.write(str(alphaN[i]))
f.write("\n")
f.write("Valor da FO nova = ")
f.write(str(fo(best)))
f.write("\n")
vetor_fo[i] += fo(best)
reset(sections, yards)
break
# print (sorteado, alphaN[indice])
for i in range(10):
if(soma[i] == 0):
qi[i] = 0
else:
medias[i] = vetor_fo[i]/soma[i]
qi[i] = (1/medias[i])*pow(10,5)
frase = "i: " + str(i) + "soma: " + str(soma[i]) + "FO: " + str(vetor_fo[i]) + "medias: " + str(medias[i]) + "qi: " + str(qi[i])
f.write(frase)
f.write("\n")
q = 0
for i in range(10):
q += qi[i]
print(q)
novopAlphaN =[]
somaP = 0
for i in range(10):
pAlphaN[i] = qi[i]/q*100
print(pAlphaN[i])
if(qi[i] == 0):
novopAlphaN.append([somaP, somaP])
else:
novopAlphaN.append([somaP, somaP + pAlphaN[i] - 1])
somaP += pAlphaN[i]
# print(soma)
for i in range(10):
f.write(str(novopAlphaN[i]))
f.write("\n")
pAlphaN = novopAlphaN
# for a in alphaN:
# print("alpha: ", a)
# busca_local(vessels, sections, yards, a)
#fazendo a busca local
# busca_local(vessels, sections, yards, 0.1)
#criar novo laço para testar a solução viável da literatura
#continuar a tabela, valores iguais no começo e intercalando depois
#aleatoridade com berço e pátio
#tentar fazer uma função gulosa onde para cada navio verifica a combinação de berço e pátio e vê qual a melhor, ou seja,
#fazer as colunas para cada navio
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#marcar o tempo de execução do algoritmo - gprof
#implementar o alfa reativo, alfa 1 e alfa 2, um para navio e um para berço, e a intercalação dos dois.
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#cuspir a resposta num arquivo, csv talvez, automatizar a coleta de dados
#Testar com 10 instâncias de cada tamanho
#Automatizar a saida
#Colocar o guloso junto na tabela, e executar com 100000
#Escrever no papel o problema e o algoritmo de solução, heurística proposta no formato de pseudo-código
#Busca Local e GRASP
#busca local
| n:
de | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.