file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
keyword_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v2/resources/keyword_plan.proto
package resources
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
common "google.golang.org/genproto/googleapis/ads/googleads/v2/common"
enums "google.golang.org/genproto/googleapis/ads/googleads/v2/enums"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Keyword Planner plan.
// Max number of saved keyword plans: 10000.
// It's possible to remove plans if limit is reached.
type KeywordPlan struct {
// The resource name of the Keyword Planner plan.
// KeywordPlan resource names have the form:
//
// `customers/{customer_id}/keywordPlans/{kp_plan_id}`
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The ID of the keyword plan.
Id *wrappers.Int64Value `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// The name of the keyword plan.
//
// This field is required and should not be empty when creating new keyword
// plans.
Name *wrappers.StringValue `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// The date period used for forecasting the plan.
ForecastPeriod *KeywordPlanForecastPeriod `protobuf:"bytes,4,opt,name=forecast_period,json=forecastPeriod,proto3" json:"forecast_period,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlan) Reset() { *m = KeywordPlan{} }
func (m *KeywordPlan) String() string { return proto.CompactTextString(m) }
func (*KeywordPlan) ProtoMessage() {}
func (*KeywordPlan) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{0}
}
func (m *KeywordPlan) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlan.Unmarshal(m, b)
}
func (m *KeywordPlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlan.Marshal(b, m, deterministic)
}
func (m *KeywordPlan) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlan.Merge(m, src)
}
func (m *KeywordPlan) XXX_Size() int |
func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil {
return m.ForecastPeriod
}
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) isKeywordPlanForecastPeriod_Interval() {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x69, 0x40, 0x38, 0x8f, 0x5d, 0xe7, 0xd1, 0x94, 0x31, 0x61, 0x8c, 0x2b, 0xa2, 0x4a, 0xce,
0xec, 0xeb, 0xc1, 0x9f, 0x00, 0x6d, 0x7c, 0x34, 0x36, 0xa3, 0x8a, 0xb0, 0xe8, 0x29, 0xda, 0x72,
0xa3, 0x4c, 0x19, 0xa9, 0x61, 0x10, 0xec, 0x05, 0x47, 0xeb, 0xf9, 0xa6, 0xfb, 0xf1, 0x13, 0xa9,
0x21, 0x7a, 0x81, 0xc2, 0x92, 0x0e, 0xc2, 0xbd, 0xe0, 0x68, 0x23, 0x79, 0x6c, 0xf7, 0x80, 0x9d,
0x3f, 0x3e, 0x63, 0xea, 0xf5, 0xf1, 0x98, 0x54, 0x2d, 0xe4, 0x61, 0x49, 0xa3, 0x97, 0x68, 0x4d,
0x0b, 0xad, 0x6a, 0x7c, 0xf7, 0x3f, 0xfc, 0xb3, 0x12, 0x25, 0x2b, 0x0c, 0xaf, 0xc9, 0x08, 0xd0,
0x03, 0x3f, 0x6c, 0x03, 0xa2, 0xe4, 0x74, 0xb0, 0xa6, 0x8b, 0xdf, 0xe2, 0xa5, 0xeb, 0xc7, 0xd7,
0x86, 0xf9, 0x60, 0x45, 0x46, 0x5a, 0x23, 0xdf, 0x9e, 0xdf, 0xc8, 0x0f, 0x7e, 0x07, 0xe8, 0x51,
0x2f, 0x1d, 0x7d, 0x0b, 0xd0, 0x56, 0x77, 0x2a, 0xbf, 0x6e, 0xbd, 0x89, 0xed, 0xe4, 0xa2, 0xb7,
0x07, 0x7d, 0xb3, 0xdb, 0xfc, 0xcf, 0xac, 0xc2, 0x7b, 0xd6, 0xd6, 0x77, 0xbd, 0x9f, 0xae, 0xe4,
0x9b, 0x9d, 0xa5, 0xcb, 0xa3, 0x73, 0x84, 0x74, 0x0b, 0x82, 0xb0, 0x02, 0xec, 0xbe, 0x9f, 0xf5,
0xfa, 0x9b, 0xef, 0x0b, 0xbf, 0x23, 0x0a, 0xf2, 0xae, 0xe0, 0x74, 0x25, 0x5f, 0xa7, 0x2e, 0x19,
0x22, 0x74, 0xdf, 0x4d, 0x32, 0xfc, 0x1b, 0xa0, 0xc3, 0x19, 0xaf, 0x97, 0x6f, 0x73, 0xf8, 0xf0,
0x5a, 0xbb, 0xa3, 0xee, 0x62, 0xa3, 0xe0, 0xe2, 0xdc, 0x96, 0x15, 0xbc, 0x22, 0xac, 0xc0, 0x5c,
0x14, 0x71, 0x01, 0x4c, 0xdf, 0xd3, 0x7d, 0xc4, 0x4d, 0x29, 0xef, 0xf8, 0x63, 0xbd, 0xf1, 0xd1,
0xf7, 0x70, 0xf5, 0x24, 0xcb, 0x7e, 0x84, 0xfb, 0x27, 0x46, 0x32, 0xa3, 0x12, 0x9b, 0xb0, 0x8b,
0xc6, 0x09, 0xce, 0x1d, 0xf9, 0xd3, 0x31, 0x93, 0x8c, 0xca, 0x89, 0x67, 0x26, 0xe3, 0x64, 0xe2,
0x99, 0x5f, 0xe1, 0xa1, 0x79, 0x48, 0xd3, 0x8c, 0xca, 0x34, 0xf5, 0x54, 0x9a, 0x8e, 0x93, 0x34,
0xf5, 0xdc, 0xe5, 0x3d, 0xdd, 0xec, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x35, 0x7d,
0x8b, 0x04, 0x04, 0x00, 0x00,
}
| {
return xxx_messageInfo_KeywordPlan.Size(m)
} | identifier_body |
keyword_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v2/resources/keyword_plan.proto
package resources
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
common "google.golang.org/genproto/googleapis/ads/googleads/v2/common"
enums "google.golang.org/genproto/googleapis/ads/googleads/v2/enums"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Keyword Planner plan.
// Max number of saved keyword plans: 10000.
// It's possible to remove plans if limit is reached.
type KeywordPlan struct {
// The resource name of the Keyword Planner plan.
// KeywordPlan resource names have the form:
//
// `customers/{customer_id}/keywordPlans/{kp_plan_id}`
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The ID of the keyword plan.
Id *wrappers.Int64Value `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// The name of the keyword plan.
//
// This field is required and should not be empty when creating new keyword
// plans.
Name *wrappers.StringValue `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// The date period used for forecasting the plan.
ForecastPeriod *KeywordPlanForecastPeriod `protobuf:"bytes,4,opt,name=forecast_period,json=forecastPeriod,proto3" json:"forecast_period,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlan) Reset() { *m = KeywordPlan{} }
func (m *KeywordPlan) String() string { return proto.CompactTextString(m) }
func (*KeywordPlan) ProtoMessage() {}
func (*KeywordPlan) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{0}
}
func (m *KeywordPlan) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlan.Unmarshal(m, b)
}
func (m *KeywordPlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlan.Marshal(b, m, deterministic)
}
func (m *KeywordPlan) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlan.Merge(m, src)
}
func (m *KeywordPlan) XXX_Size() int {
return xxx_messageInfo_KeywordPlan.Size(m)
}
func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil |
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) isKeywordPlanForecastPeriod_Interval() {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x69, 0x40, 0x38, 0x8f, 0x5d, 0xe7, 0xd1, 0x94, 0x31, 0x61, 0x8c, 0x2b, 0xa2, 0x4a, 0xce,
0xec, 0xeb, 0xc1, 0x9f, 0x00, 0x6d, 0x7c, 0x34, 0x36, 0xa3, 0x8a, 0xb0, 0xe8, 0x29, 0xda, 0x72,
0xa3, 0x4c, 0x19, 0xa9, 0x61, 0x10, 0xec, 0x05, 0x47, 0xeb, 0xf9, 0xa6, 0xfb, 0xf1, 0x13, 0xa9,
0x21, 0x7a, 0x81, 0xc2, 0x92, 0x0e, 0xc2, 0xbd, 0xe0, 0x68, 0x23, 0x79, 0x6c, 0xf7, 0x80, 0x9d,
0x3f, 0x3e, 0x63, 0xea, 0xf5, 0xf1, 0x98, 0x54, 0x2d, 0xe4, 0x61, 0x49, 0xa3, 0x97, 0x68, 0x4d,
0x0b, 0xad, 0x6a, 0x7c, 0xf7, 0x3f, 0xfc, 0xb3, 0x12, 0x25, 0x2b, 0x0c, 0xaf, 0xc9, 0x08, 0xd0,
0x03, 0x3f, 0x6c, 0x03, 0xa2, 0xe4, 0x74, 0xb0, 0xa6, 0x8b, 0xdf, 0xe2, 0xa5, 0xeb, 0xc7, 0xd7,
0x86, 0xf9, 0x60, 0x45, 0x46, 0x5a, 0x23, 0xdf, 0x9e, 0xdf, 0xc8, 0x0f, 0x7e, 0x07, 0xe8, 0x51,
0x2f, 0x1d, 0x7d, 0x0b, 0xd0, 0x56, 0x77, 0x2a, 0xbf, 0x6e, 0xbd, 0x89, 0xed, 0xe4, 0xa2, 0xb7,
0x07, 0x7d, 0xb3, 0xdb, 0xfc, 0xcf, 0xac, 0xc2, 0x7b, 0xd6, 0xd6, 0x77, 0xbd, 0x9f, 0xae, 0xe4,
0x9b, 0x9d, 0xa5, 0xcb, 0xa3, 0x73, 0x84, 0x74, 0x0b, 0x82, 0xb0, 0x02, 0xec, 0xbe, 0x9f, 0xf5,
0xfa, 0x9b, 0xef, 0x0b, 0xbf, 0x23, 0x0a, 0xf2, 0xae, 0xe0, 0x74, 0x25, 0x5f, 0xa7, 0x2e, 0x19,
0x22, 0x74, 0xdf, 0x4d, 0x32, 0xfc, 0x1b, 0xa0, 0xc3, 0x19, 0xaf, 0x97, 0x6f, 0x73, 0xf8, 0xf0,
0x5a, 0xbb, 0xa3, 0xee, 0x62, 0xa3, 0xe0, 0xe2, 0xdc, 0x96, 0x15, 0xbc, 0x22, 0xac, 0xc0, 0x5c,
0x14, 0x71, 0x01, 0x4c, 0xdf, 0xd3, 0x7d, 0xc4, 0x4d, 0x29, 0xef, 0xf8, 0x63, 0xbd, 0xf1, 0xd1,
0xf7, 0x70, 0xf5, 0x24, 0xcb, 0x7e, 0x84, 0xfb, 0x27, 0x46, 0x32, 0xa3, 0x12, 0x9b, 0xb0, 0x8b,
0xc6, 0x09, 0xce, 0x1d, 0xf9, 0xd3, 0x31, 0x93, 0x8c, 0xca, 0x89, 0x67, 0x26, 0xe3, 0x64, 0xe2,
0x99, 0x5f, 0xe1, 0xa1, 0x79, 0x48, 0xd3, 0x8c, 0xca, 0x34, 0xf5, 0x54, 0x9a, 0x8e, 0x93, 0x34,
0xf5, 0xdc, 0xe5, 0x3d, 0xdd, 0xec, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x35, 0x7d,
0x8b, 0x04, 0x04, 0x00, 0x00,
}
| {
return m.ForecastPeriod
} | conditional_block |
keyword_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v2/resources/keyword_plan.proto
package resources
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
common "google.golang.org/genproto/googleapis/ads/googleads/v2/common"
enums "google.golang.org/genproto/googleapis/ads/googleads/v2/enums"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Keyword Planner plan.
// Max number of saved keyword plans: 10000.
// It's possible to remove plans if limit is reached.
type KeywordPlan struct {
// The resource name of the Keyword Planner plan.
// KeywordPlan resource names have the form:
//
// `customers/{customer_id}/keywordPlans/{kp_plan_id}`
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The ID of the keyword plan.
Id *wrappers.Int64Value `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// The name of the keyword plan.
//
// This field is required and should not be empty when creating new keyword
// plans.
Name *wrappers.StringValue `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// The date period used for forecasting the plan.
ForecastPeriod *KeywordPlanForecastPeriod `protobuf:"bytes,4,opt,name=forecast_period,json=forecastPeriod,proto3" json:"forecast_period,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlan) Reset() { *m = KeywordPlan{} }
func (m *KeywordPlan) String() string { return proto.CompactTextString(m) }
func (*KeywordPlan) ProtoMessage() {}
func (*KeywordPlan) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{0}
}
func (m *KeywordPlan) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlan.Unmarshal(m, b)
}
func (m *KeywordPlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlan.Marshal(b, m, deterministic)
}
func (m *KeywordPlan) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlan.Merge(m, src)
}
func (m *KeywordPlan) XXX_Size() int {
return xxx_messageInfo_KeywordPlan.Size(m)
}
func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil {
return m.ForecastPeriod
}
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) | () {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x69, 0x40, 0x38, 0x8f, 0x5d, 0xe7, 0xd1, 0x94, 0x31, 0x61, 0x8c, 0x2b, 0xa2, 0x4a, 0xce,
0xec, 0xeb, 0xc1, 0x9f, 0x00, 0x6d, 0x7c, 0x34, 0x36, 0xa3, 0x8a, 0xb0, 0xe8, 0x29, 0xda, 0x72,
0xa3, 0x4c, 0x19, 0xa9, 0x61, 0x10, 0xec, 0x05, 0x47, 0xeb, 0xf9, 0xa6, 0xfb, 0xf1, 0x13, 0xa9,
0x21, 0x7a, 0x81, 0xc2, 0x92, 0x0e, 0xc2, 0xbd, 0xe0, 0x68, 0x23, 0x79, 0x6c, 0xf7, 0x80, 0x9d,
0x3f, 0x3e, 0x63, 0xea, 0xf5, 0xf1, 0x98, 0x54, 0x2d, 0xe4, 0x61, 0x49, 0xa3, 0x97, 0x68, 0x4d,
0x0b, 0xad, 0x6a, 0x7c, 0xf7, 0x3f, 0xfc, 0xb3, 0x12, 0x25, 0x2b, 0x0c, 0xaf, 0xc9, 0x08, 0xd0,
0x03, 0x3f, 0x6c, 0x03, 0xa2, 0xe4, 0x74, 0xb0, 0xa6, 0x8b, 0xdf, 0xe2, 0xa5, 0xeb, 0xc7, 0xd7,
0x86, 0xf9, 0x60, 0x45, 0x46, 0x5a, 0x23, 0xdf, 0x9e, 0xdf, 0xc8, 0x0f, 0x7e, 0x07, 0xe8, 0x51,
0x2f, 0x1d, 0x7d, 0x0b, 0xd0, 0x56, 0x77, 0x2a, 0xbf, 0x6e, 0xbd, 0x89, 0xed, 0xe4, 0xa2, 0xb7,
0x07, 0x7d, 0xb3, 0xdb, 0xfc, 0xcf, 0xac, 0xc2, 0x7b, 0xd6, 0xd6, 0x77, 0xbd, 0x9f, 0xae, 0xe4,
0x9b, 0x9d, 0xa5, 0xcb, 0xa3, 0x73, 0x84, 0x74, 0x0b, 0x82, 0xb0, 0x02, 0xec, 0xbe, 0x9f, 0xf5,
0xfa, 0x9b, 0xef, 0x0b, 0xbf, 0x23, 0x0a, 0xf2, 0xae, 0xe0, 0x74, 0x25, 0x5f, 0xa7, 0x2e, 0x19,
0x22, 0x74, 0xdf, 0x4d, 0x32, 0xfc, 0x1b, 0xa0, 0xc3, 0x19, 0xaf, 0x97, 0x6f, 0x73, 0xf8, 0xf0,
0x5a, 0xbb, 0xa3, 0xee, 0x62, 0xa3, 0xe0, 0xe2, 0xdc, 0x96, 0x15, 0xbc, 0x22, 0xac, 0xc0, 0x5c,
0x14, 0x71, 0x01, 0x4c, 0xdf, 0xd3, 0x7d, 0xc4, 0x4d, 0x29, 0xef, 0xf8, 0x63, 0xbd, 0xf1, 0xd1,
0xf7, 0x70, 0xf5, 0x24, 0xcb, 0x7e, 0x84, 0xfb, 0x27, 0x46, 0x32, 0xa3, 0x12, 0x9b, 0xb0, 0x8b,
0xc6, 0x09, 0xce, 0x1d, 0xf9, 0xd3, 0x31, 0x93, 0x8c, 0xca, 0x89, 0x67, 0x26, 0xe3, 0x64, 0xe2,
0x99, 0x5f, 0xe1, 0xa1, 0x79, 0x48, 0xd3, 0x8c, 0xca, 0x34, 0xf5, 0x54, 0x9a, 0x8e, 0x93, 0x34,
0xf5, 0xdc, 0xe5, 0x3d, 0xdd, 0xec, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x35, 0x7d,
0x8b, 0x04, 0x04, 0x00, 0x00,
}
| isKeywordPlanForecastPeriod_Interval | identifier_name |
keyword_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v2/resources/keyword_plan.proto
package resources
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
common "google.golang.org/genproto/googleapis/ads/googleads/v2/common"
enums "google.golang.org/genproto/googleapis/ads/googleads/v2/enums"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Keyword Planner plan.
// Max number of saved keyword plans: 10000.
// It's possible to remove plans if limit is reached.
type KeywordPlan struct {
// The resource name of the Keyword Planner plan.
// KeywordPlan resource names have the form:
//
// `customers/{customer_id}/keywordPlans/{kp_plan_id}`
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The ID of the keyword plan.
Id *wrappers.Int64Value `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// The name of the keyword plan.
//
// This field is required and should not be empty when creating new keyword
// plans.
Name *wrappers.StringValue `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// The date period used for forecasting the plan.
ForecastPeriod *KeywordPlanForecastPeriod `protobuf:"bytes,4,opt,name=forecast_period,json=forecastPeriod,proto3" json:"forecast_period,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlan) Reset() { *m = KeywordPlan{} }
func (m *KeywordPlan) String() string { return proto.CompactTextString(m) }
func (*KeywordPlan) ProtoMessage() {}
func (*KeywordPlan) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{0}
}
func (m *KeywordPlan) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlan.Unmarshal(m, b)
}
func (m *KeywordPlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
| xxx_messageInfo_KeywordPlan.Merge(m, src)
}
func (m *KeywordPlan) XXX_Size() int {
return xxx_messageInfo_KeywordPlan.Size(m)
}
func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil {
return m.ForecastPeriod
}
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) isKeywordPlanForecastPeriod_Interval() {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x69, 0x40, 0x38, 0x8f, 0x5d, 0xe7, 0xd1, 0x94, 0x31, 0x61, 0x8c, 0x2b, 0xa2, 0x4a, 0xce,
0xec, 0xeb, 0xc1, 0x9f, 0x00, 0x6d, 0x7c, 0x34, 0x36, 0xa3, 0x8a, 0xb0, 0xe8, 0x29, 0xda, 0x72,
0xa3, 0x4c, 0x19, 0xa9, 0x61, 0x10, 0xec, 0x05, 0x47, 0xeb, 0xf9, 0xa6, 0xfb, 0xf1, 0x13, 0xa9,
0x21, 0x7a, 0x81, 0xc2, 0x92, 0x0e, 0xc2, 0xbd, 0xe0, 0x68, 0x23, 0x79, 0x6c, 0xf7, 0x80, 0x9d,
0x3f, 0x3e, 0x63, 0xea, 0xf5, 0xf1, 0x98, 0x54, 0x2d, 0xe4, 0x61, 0x49, 0xa3, 0x97, 0x68, 0x4d,
0x0b, 0xad, 0x6a, 0x7c, 0xf7, 0x3f, 0xfc, 0xb3, 0x12, 0x25, 0x2b, 0x0c, 0xaf, 0xc9, 0x08, 0xd0,
0x03, 0x3f, 0x6c, 0x03, 0xa2, 0xe4, 0x74, 0xb0, 0xa6, 0x8b, 0xdf, 0xe2, 0xa5, 0xeb, 0xc7, 0xd7,
0x86, 0xf9, 0x60, 0x45, 0x46, 0x5a, 0x23, 0xdf, 0x9e, 0xdf, 0xc8, 0x0f, 0x7e, 0x07, 0xe8, 0x51,
0x2f, 0x1d, 0x7d, 0x0b, 0xd0, 0x56, 0x77, 0x2a, 0xbf, 0x6e, 0xbd, 0x89, 0xed, 0xe4, 0xa2, 0xb7,
0x07, 0x7d, 0xb3, 0xdb, 0xfc, 0xcf, 0xac, 0xc2, 0x7b, 0xd6, 0xd6, 0x77, 0xbd, 0x9f, 0xae, 0xe4,
0x9b, 0x9d, 0xa5, 0xcb, 0xa3, 0x73, 0x84, 0x74, 0x0b, 0x82, 0xb0, 0x02, 0xec, 0xbe, 0x9f, 0xf5,
0xfa, 0x9b, 0xef, 0x0b, 0xbf, 0x23, 0x0a, 0xf2, 0xae, 0xe0, 0x74, 0x25, 0x5f, 0xa7, 0x2e, 0x19,
0x22, 0x74, 0xdf, 0x4d, 0x32, 0xfc, 0x1b, 0xa0, 0xc3, 0x19, 0xaf, 0x97, 0x6f, 0x73, 0xf8, 0xf0,
0x5a, 0xbb, 0xa3, 0xee, 0x62, 0xa3, 0xe0, 0xe2, 0xdc, 0x96, 0x15, 0xbc, 0x22, 0xac, 0xc0, 0x5c,
0x14, 0x71, 0x01, 0x4c, 0xdf, 0xd3, 0x7d, 0xc4, 0x4d, 0x29, 0xef, 0xf8, 0x63, 0xbd, 0xf1, 0xd1,
0xf7, 0x70, 0xf5, 0x24, 0xcb, 0x7e, 0x84, 0xfb, 0x27, 0x46, 0x32, 0xa3, 0x12, 0x9b, 0xb0, 0x8b,
0xc6, 0x09, 0xce, 0x1d, 0xf9, 0xd3, 0x31, 0x93, 0x8c, 0xca, 0x89, 0x67, 0x26, 0xe3, 0x64, 0xe2,
0x99, 0x5f, 0xe1, 0xa1, 0x79, 0x48, 0xd3, 0x8c, 0xca, 0x34, 0xf5, 0x54, 0x9a, 0x8e, 0x93, 0x34,
0xf5, 0xdc, 0xe5, 0x3d, 0xdd, 0xec, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x35, 0x7d,
0x8b, 0x04, 0x04, 0x00, 0x00,
} | return xxx_messageInfo_KeywordPlan.Marshal(b, m, deterministic)
}
func (m *KeywordPlan) XXX_Merge(src proto.Message) {
| random_line_split |
main.go | package main
// this broker enables connection from batsim to on HPC and one Big Data
// Analytics (BDA) schedulers like presented here:
//
// /--- HPC
// BATSIM --- BROKER --
// \--- BDA
//
// Workload is split using the ``_hpc.json`` or ``_bda.json``suffix on
// the workload filename given to batsim.
//
// It also implements bebida prolog/epilog
import (
zmq "github.com/pebbe/zmq4"
//"time"
"encoding/json"
"fmt"
//"os"
"math"
"strings"
)
// Declare this as global to use only two message buffer for the whole code
var jmsg BatMessage
var msg []byte
var hpc_reply_json []byte
var bda_reply_json []byte
var err error
// Message from the Batsim protocole
type BatMessage struct {
Now float64 `json:"now"`
Events []Event `json:"events"`
}
type Event struct {
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
Data map[string]interface{} `json:"data"`
}
// Helpers
func is_bda_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string |
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
}
// check that the removed resources are the same that are
// allocated by the HPC job
if to_add_event.Data["alloc"] != event.Data["resources"] {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) do not match blocked event resources %s", event.Data["resources"], to_add_event.Data["alloc"]))
}
// Add it to the reply
hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// remove this event from BDA events
to_remove_indexes = append(to_remove_indexes, index)
// remove the blocked event from the map
delete(prolog_blocked_hpc_events, event.Data["resources"].(string))
}
//case "RESOURCES_ADDED":
// {
// // End of epilog: Resource Added event from BDA so release the message
// // pop blocked event
// var to_add_event Event
// to_add_event, epilog_blocked_hpc_events = epilog_blocked_hpc_events[0], epilog_blocked_hpc_events[1:]
// // check that the removed resources are the same that are
// // allocated by the HPC job
// if to_add_event.Data["alloc"] != event.Data["resources"] {
// panic("Error in epilog ordering!!!")
// }
// // Add it to the reply
// hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// // remove this event from BDA events
// to_remove_indexes = append(to_remove_indexes, index)
// }
}
}
// Prevent Batsim to receive RESOURCES_REMOVED message
removeEvents(to_remove_indexes, &bda_reply.Events)
// Merge and forward message to batsim
//
// BATSIM <--- BROKER
// reset message structure
jmsg = BatMessage{}
// merge messages with ordered events
jmsg.Events = append(hpc_reply.Events, bda_reply.Events...)
// get higher timestamp
jmsg.Now = math.Max(bda_reply.Now, hpc_reply.Now)
msg, err = json.Marshal(jmsg)
if err != nil {
panic("Error in message merging: " + err.Error())
}
bat_sock.SendBytes(msg, 0)
fmt.Println("Batsim <= Broker(HPC+BDA):\n", string(msg))
fmt.Println("------------------------------------------")
fmt.Println("Blocked HPC events:");
blocked_events, _ := json.Marshal(prolog_blocked_hpc_events)
fmt.Println(string(blocked_events));
fmt.Println("------------------------------------------")
}
}
| {
return strings.Split(id, "!")[0]
} | identifier_body |
main.go | package main
// this broker enables connection from batsim to on HPC and one Big Data
// Analytics (BDA) schedulers like presented here:
//
// /--- HPC
// BATSIM --- BROKER --
// \--- BDA
//
// Workload is split using the ``_hpc.json`` or ``_bda.json``suffix on
// the workload filename given to batsim.
//
// It also implements bebida prolog/epilog
import (
zmq "github.com/pebbe/zmq4"
//"time"
"encoding/json"
"fmt"
//"os"
"math"
"strings"
)
// Declare this as global to use only two message buffer for the whole code
var jmsg BatMessage
var msg []byte
var hpc_reply_json []byte
var bda_reply_json []byte
var err error
// Message from the Batsim protocole
type BatMessage struct {
Now float64 `json:"now"`
Events []Event `json:"events"`
}
type Event struct {
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
Data map[string]interface{} `json:"data"`
}
// Helpers
func is_bda_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end |
}
| {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
}
// check that the removed resources are the same that are
// allocated by the HPC job
if to_add_event.Data["alloc"] != event.Data["resources"] {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) do not match blocked event resources %s", event.Data["resources"], to_add_event.Data["alloc"]))
}
// Add it to the reply
hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// remove this event from BDA events
to_remove_indexes = append(to_remove_indexes, index)
// remove the blocked event from the map
delete(prolog_blocked_hpc_events, event.Data["resources"].(string))
}
//case "RESOURCES_ADDED":
// {
// // End of epilog: Resource Added event from BDA so release the message
// // pop blocked event
// var to_add_event Event
// to_add_event, epilog_blocked_hpc_events = epilog_blocked_hpc_events[0], epilog_blocked_hpc_events[1:]
// // check that the removed resources are the same that are
// // allocated by the HPC job
// if to_add_event.Data["alloc"] != event.Data["resources"] {
// panic("Error in epilog ordering!!!")
// }
// // Add it to the reply
// hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// // remove this event from BDA events
// to_remove_indexes = append(to_remove_indexes, index)
// }
}
}
// Prevent Batsim to receive RESOURCES_REMOVED message
removeEvents(to_remove_indexes, &bda_reply.Events)
// Merge and forward message to batsim
//
// BATSIM <--- BROKER
// reset message structure
jmsg = BatMessage{}
// merge messages with ordered events
jmsg.Events = append(hpc_reply.Events, bda_reply.Events...)
// get higher timestamp
jmsg.Now = math.Max(bda_reply.Now, hpc_reply.Now)
msg, err = json.Marshal(jmsg)
if err != nil {
panic("Error in message merging: " + err.Error())
}
bat_sock.SendBytes(msg, 0)
fmt.Println("Batsim <= Broker(HPC+BDA):\n", string(msg))
fmt.Println("------------------------------------------")
fmt.Println("Blocked HPC events:");
blocked_events, _ := json.Marshal(prolog_blocked_hpc_events)
fmt.Println(string(blocked_events));
fmt.Println("------------------------------------------")
} | conditional_block |
main.go | package main
// this broker enables connection from batsim to on HPC and one Big Data
// Analytics (BDA) schedulers like presented here:
//
// /--- HPC
// BATSIM --- BROKER --
// \--- BDA
//
// Workload is split using the ``_hpc.json`` or ``_bda.json``suffix on
// the workload filename given to batsim.
//
// It also implements bebida prolog/epilog
import (
zmq "github.com/pebbe/zmq4"
//"time"
"encoding/json"
"fmt"
//"os"
"math"
"strings"
)
// Declare this as global to use only two message buffer for the whole code
var jmsg BatMessage
var msg []byte
var hpc_reply_json []byte
var bda_reply_json []byte
var err error
// Message from the Batsim protocole
type BatMessage struct {
Now float64 `json:"now"`
Events []Event `json:"events"`
}
type Event struct {
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
Data map[string]interface{} `json:"data"`
}
// Helpers
func is_bda_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func | (socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
}
// check that the removed resources are the same that are
// allocated by the HPC job
if to_add_event.Data["alloc"] != event.Data["resources"] {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) do not match blocked event resources %s", event.Data["resources"], to_add_event.Data["alloc"]))
}
// Add it to the reply
hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// remove this event from BDA events
to_remove_indexes = append(to_remove_indexes, index)
// remove the blocked event from the map
delete(prolog_blocked_hpc_events, event.Data["resources"].(string))
}
//case "RESOURCES_ADDED":
// {
// // End of epilog: Resource Added event from BDA so release the message
// // pop blocked event
// var to_add_event Event
// to_add_event, epilog_blocked_hpc_events = epilog_blocked_hpc_events[0], epilog_blocked_hpc_events[1:]
// // check that the removed resources are the same that are
// // allocated by the HPC job
// if to_add_event.Data["alloc"] != event.Data["resources"] {
// panic("Error in epilog ordering!!!")
// }
// // Add it to the reply
// hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// // remove this event from BDA events
// to_remove_indexes = append(to_remove_indexes, index)
// }
}
}
// Prevent Batsim to receive RESOURCES_REMOVED message
removeEvents(to_remove_indexes, &bda_reply.Events)
// Merge and forward message to batsim
//
// BATSIM <--- BROKER
// reset message structure
jmsg = BatMessage{}
// merge messages with ordered events
jmsg.Events = append(hpc_reply.Events, bda_reply.Events...)
// get higher timestamp
jmsg.Now = math.Max(bda_reply.Now, hpc_reply.Now)
msg, err = json.Marshal(jmsg)
if err != nil {
panic("Error in message merging: " + err.Error())
}
bat_sock.SendBytes(msg, 0)
fmt.Println("Batsim <= Broker(HPC+BDA):\n", string(msg))
fmt.Println("------------------------------------------")
fmt.Println("Blocked HPC events:");
blocked_events, _ := json.Marshal(prolog_blocked_hpc_events)
fmt.Println(string(blocked_events));
fmt.Println("------------------------------------------")
}
}
| recvBatsimMessage | identifier_name |
main.go | package main
// this broker enables connection from batsim to on HPC and one Big Data
// Analytics (BDA) schedulers like presented here:
//
// /--- HPC
// BATSIM --- BROKER --
// \--- BDA
//
// Workload is split using the ``_hpc.json`` or ``_bda.json``suffix on
// the workload filename given to batsim.
//
// It also implements bebida prolog/epilog
import (
zmq "github.com/pebbe/zmq4"
//"time"
"encoding/json"
"fmt"
//"os"
"math"
"strings"
)
// Declare this as global to use only two message buffer for the whole code
var jmsg BatMessage
var msg []byte
var hpc_reply_json []byte
var bda_reply_json []byte
var err error
// Message from the Batsim protocole
type BatMessage struct {
Now float64 `json:"now"`
Events []Event `json:"events"`
}
type Event struct {
Timestamp float64 `json:"timestamp"`
Type string `json:"type"`
Data map[string]interface{} `json:"data"`
}
// Helpers
func is_bda_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id | bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
}
// check that the removed resources are the same that are
// allocated by the HPC job
if to_add_event.Data["alloc"] != event.Data["resources"] {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) do not match blocked event resources %s", event.Data["resources"], to_add_event.Data["alloc"]))
}
// Add it to the reply
hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// remove this event from BDA events
to_remove_indexes = append(to_remove_indexes, index)
// remove the blocked event from the map
delete(prolog_blocked_hpc_events, event.Data["resources"].(string))
}
//case "RESOURCES_ADDED":
// {
// // End of epilog: Resource Added event from BDA so release the message
// // pop blocked event
// var to_add_event Event
// to_add_event, epilog_blocked_hpc_events = epilog_blocked_hpc_events[0], epilog_blocked_hpc_events[1:]
// // check that the removed resources are the same that are
// // allocated by the HPC job
// if to_add_event.Data["alloc"] != event.Data["resources"] {
// panic("Error in epilog ordering!!!")
// }
// // Add it to the reply
// hpc_reply.Events = append(hpc_reply.Events, to_add_event)
// // remove this event from BDA events
// to_remove_indexes = append(to_remove_indexes, index)
// }
}
}
// Prevent Batsim to receive RESOURCES_REMOVED message
removeEvents(to_remove_indexes, &bda_reply.Events)
// Merge and forward message to batsim
//
// BATSIM <--- BROKER
// reset message structure
jmsg = BatMessage{}
// merge messages with ordered events
jmsg.Events = append(hpc_reply.Events, bda_reply.Events...)
// get higher timestamp
jmsg.Now = math.Max(bda_reply.Now, hpc_reply.Now)
msg, err = json.Marshal(jmsg)
if err != nil {
panic("Error in message merging: " + err.Error())
}
bat_sock.SendBytes(msg, 0)
fmt.Println("Batsim <= Broker(HPC+BDA):\n", string(msg))
fmt.Println("------------------------------------------")
fmt.Println("Blocked HPC events:");
blocked_events, _ := json.Marshal(prolog_blocked_hpc_events)
fmt.Println(string(blocked_events));
fmt.Println("------------------------------------------")
}
} |
} else if is_bda_workload(path.(string)) { | random_line_split |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn | (&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| direct_unlock | identifier_name |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() | else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} | conditional_block |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
} | let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
} |
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> { | random_line_split |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> |
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| {
Qutex {
inner: self.inner.clone(),
}
} | identifier_body |
projects.py | import datetime
import os
from typing import Dict, List, Optional
from urllib.parse import urlencode
import shortuuid
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from meta.views import Meta
from accounts.models import Account, AccountTeam
from jobs.models import Job, JobMethod
from manager.helpers import EnumChoice
from manager.storage import (
StorageUsageMixin,
media_storage,
snapshots_storage,
working_storage,
)
from users.models import User
class ProjectLiveness(EnumChoice):
"""
Where the project content is served from.
"""
LIVE = "live"
LATEST = "latest"
PINNED = "pinned"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return (
# Live is currently disabled as a choice
# pending implementation
("live", "Use working directory"),
("latest", "Use latest snapshot"),
("pinned", "Pinned to snapshot"),
)
def generate_project_key():
"""
Generate a unique, and very difficult to guess, project key.
"""
return shortuuid.ShortUUID().random(length=32)
class Project(StorageUsageMixin, models.Model):
"""
A project.
Projects are always owned by an account.
"""
account = models.ForeignKey(
Account,
on_delete=models.CASCADE,
related_name="projects",
null=False,
blank=False,
help_text="Account that the project belongs to.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="projects_created",
help_text="The user who created the project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the project was created."
)
name = models.SlugField(
null=False,
blank=False,
help_text="Name of the project. Lowercase only and unique for the account. "
"Will be used in URLS e.g. https://hub.stenci.la/awesome-org/great-project.",
)
title = models.CharField(
null=True,
blank=True,
max_length=256,
help_text="Title of the project to display in its profile.",
)
temporary = models.BooleanField(
default=False, help_text="Is the project temporary?"
)
public = models.BooleanField(
default=True, help_text="Is the project publicly visible?"
)
featured = models.BooleanField(
default=False, help_text="Is the project to be featured in listings?"
)
key = models.CharField(
default=generate_project_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access this project if it is not public.",
)
description = models.TextField(
null=True, blank=True, help_text="Brief description of the project."
)
image_file = models.ImageField(
null=True,
blank=True,
storage=media_storage(),
upload_to="projects/images",
help_text="The image used for this project in project listings and HTML meta data.",
)
image_path = models.CharField(
null=True,
blank=True,
max_length=1024,
help_text="Path of file in the project's working directory to use as this project's image. "
"Allows the project's image to update as it is re-executed.",
)
image_updated = models.DateTimeField(
null=True,
blank=True,
help_text="When the image file was last updated (e.g. from image_path).",
)
theme = models.TextField(
null=True,
blank=True,
help_text="The name of the theme to use as the default when generating content for this project."
# See note for the `Account.theme` field for why this is a TextField.
)
extra_head = models.TextField(
null=True,
blank=True,
help_text="Content to inject into the <head> element of HTML served for this project.",
)
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def | (self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed in a
series job (if there is more than one).
"""
# Do not create individual pull jobs here because series job children
# are run in order of their ids; so we need to sort into groups first.
groups: Dict[int, List] = {}
for source in self.sources.all():
order = source.order or 0
if order in groups:
groups[order].append(source)
else:
groups[order] = [source]
steps: List[Job] = []
for order in sorted(groups.keys()):
sources = groups[order]
if len(sources) == 1:
steps.append(sources[0].pull(user))
else:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Pull sources in parallel",
)
parallel.children.set([source.pull(user) for source in sources])
steps.append(parallel)
if len(steps) == 1:
return steps[0]
else:
series = Job.objects.create(
project=self,
creator=user,
method=JobMethod.series.name,
description="Pull sources in series",
)
series.children.set(steps)
return series
def reflow(self, user: User) -> Optional[Job]:
"""
Reflow the dependencies between the project's files by rerunning jobs.
For all `current` files that have `upstreams` creates a new job that
re-executes the original job. Because jobs can have `secrets` and callbacks
to the original project, rather than creating a copy of the original job
we go through the `File` method e.g. `File.convert`. This more safely enables
project forking etc.
In the future should do a topological sort so that the
jobs get executed in parallel if possible, and in series if necessary.
"""
subjobs = []
for file in self.files.filter(
current=True,
upstreams__isnull=False,
# Currently limited to convert jobs but in future there
# may be other jobs that create a derived file
# e.g. running a script that create files.
job__method=JobMethod.convert.name,
).exclude(
# Currently exclude index.html files because dealt with
# in an explicit step in snapshot
Q(path="index.html")
# Exclude .bib and image files which are created
# as children of a parent file's generation
# See https://github.com/stencila/hub/issues/1024#issuecomment-799128207
| Q(path__endswith=".bib")
| Q(path__endswith=".png")
| Q(path__endswith=".jpg"),
):
# Convert jobs only have one upstream
upstream = file.upstreams.first()
subjob = upstream.convert(user, file.path)
subjobs.append(subjob)
if len(subjobs) > 0:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Update derived files",
)
parallel.children.set(subjobs)
return parallel
else:
return None
def pin(self, user: User, **callback) -> Job:
"""
Pin the project's container image.
Does not change the project's `container_image` field, but
rather, returns a pinned version of it. The callback should
use that value.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.pin.name,
params=dict(container_image=self.container_image,),
description=f"Pin container image for project '{self.name}'",
**callback,
)
def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:
"""
Archive the project's working directory.
Creates a copy of the project's working directory
on the `snapshots` storage.
"""
# Get the upload policy
policy = snapshots_storage().generate_post_policy(path)
url = policy.get("url") if policy else None
secrets = policy.get("fields") if policy else None
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.archive.name,
params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),
secrets=secrets,
description=f"Archive project '{self.name}'",
**callback,
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the project.
"""
job = Job.objects.create(
project=self,
creator=request.user if request.user.is_authenticated else None,
method=JobMethod.session.name,
params=dict(container_image=self.container_image),
description=f"Session for project '{self.name}'",
)
job.add_user(request)
return job
def make_project_creator_an_owner(
sender, instance: Project, created: bool, *args, **kwargs
):
"""
Make the project create an owner.
Makes sure each project has at least one owner.
"""
if sender is Project and created and instance.creator:
ProjectAgent.objects.create(
project=instance, user=instance.creator, role=ProjectRole.OWNER.name
)
post_save.connect(make_project_creator_an_owner, sender=Project)
class ProjectRole(EnumChoice):
"""
A user or team role within an account.
See `get_description` for what each role can do.
Some of roles can also be applied to the public.
For example, a project might be made public with
the `REVIEWER` role allowing anyone to comment.
"""
READER = "Reader"
REVIEWER = "Reviewer"
EDITOR = "Editor"
AUTHOR = "Author"
MANAGER = "Manager"
OWNER = "Owner"
@classmethod
def get_description(cls, role: "ProjectRole"):
"""Get the description of a project role."""
return {
cls.READER.name: "Can view project, but not make edits or share with others.",
cls.REVIEWER.name: "Can view project files and leave comments, but not edit project or share with others.",
cls.EDITOR.name: "Can edit project files and leave comments, but not share with others.",
cls.AUTHOR.name: "Can edit project files and leave comments, but not share with others.",
cls.MANAGER.name: "Can edit project files, settings, and share with others.",
cls.OWNER.name: "Can edit project files, settings, share with others, as well as delete a project",
}[role.name]
@classmethod
def from_string(cls, role: str) -> "ProjectRole":
"""Get the role from a string."""
role = role.lower()
for r in cls:
if role == r.name.lower():
return r
raise ValueError('No project role matching "{}"'.format(role))
@classmethod
def and_above(cls, role: "ProjectRole") -> List["ProjectRole"]:
"""Get a list including the role and all the roles above it."""
roles: List["ProjectRole"] = []
for r in cls:
if r == role or len(roles) > 0:
roles.append(r)
return roles
class ProjectAgent(models.Model):
"""
A user or team.
Users or teams can be added, with a role, to a project.
"""
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="agents",
help_text="Project to which the user or team is being given access to.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A user given access to the project.",
)
team = models.ForeignKey(
AccountTeam,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A team given access to the project.",
)
role = models.CharField(
null=False,
blank=False,
max_length=32,
choices=ProjectRole.as_choices(),
help_text="Role the user or team has within the project.",
)
class Meta:
constraints = [
# Each user should only have one role for a project
models.UniqueConstraint(
fields=["project", "user"], name="%(class)s_unique_project_user"
),
# Each team should only have one role for a project
models.UniqueConstraint(
fields=["project", "team"], name="%(class)s_unique_project_team"
),
]
class ProjectEvent(models.Model):
"""
A project event.
Project events are recorded primarily to provide traceability.
There are no fixed event types and arbitrary JSON data can be stored
in the `data` field. Events may be associated with a `source` or a `user`.
"""
id = models.BigAutoField(primary_key=True, help_text="Id of the event.",)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="events",
help_text="Project to which the event applies.",
)
time = models.DateTimeField(auto_now_add=True, help_text="Time of the event.")
data = models.JSONField(help_text="Data associated with the event.")
source = models.ForeignKey(
"Source",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="Source associated with the event.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="User associated with the event.",
)
| scheduled_deletion_warning | identifier_name |
projects.py | import datetime
import os
from typing import Dict, List, Optional
from urllib.parse import urlencode
import shortuuid
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from meta.views import Meta
from accounts.models import Account, AccountTeam
from jobs.models import Job, JobMethod
from manager.helpers import EnumChoice
from manager.storage import (
StorageUsageMixin,
media_storage,
snapshots_storage,
working_storage,
)
from users.models import User
class ProjectLiveness(EnumChoice):
"""
Where the project content is served from.
"""
LIVE = "live"
LATEST = "latest"
PINNED = "pinned"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return (
# Live is currently disabled as a choice
# pending implementation
("live", "Use working directory"),
("latest", "Use latest snapshot"),
("pinned", "Pinned to snapshot"),
)
def generate_project_key():
"""
Generate a unique, and very difficult to guess, project key.
"""
return shortuuid.ShortUUID().random(length=32)
class Project(StorageUsageMixin, models.Model):
"""
A project.
Projects are always owned by an account.
"""
account = models.ForeignKey(
Account,
on_delete=models.CASCADE,
related_name="projects",
null=False,
blank=False,
help_text="Account that the project belongs to.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="projects_created",
help_text="The user who created the project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the project was created."
)
name = models.SlugField(
null=False,
blank=False,
help_text="Name of the project. Lowercase only and unique for the account. "
"Will be used in URLS e.g. https://hub.stenci.la/awesome-org/great-project.",
)
title = models.CharField(
null=True,
blank=True,
max_length=256,
help_text="Title of the project to display in its profile.",
)
temporary = models.BooleanField(
default=False, help_text="Is the project temporary?"
)
public = models.BooleanField(
default=True, help_text="Is the project publicly visible?"
)
featured = models.BooleanField(
default=False, help_text="Is the project to be featured in listings?"
)
key = models.CharField(
default=generate_project_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access this project if it is not public.",
)
description = models.TextField(
null=True, blank=True, help_text="Brief description of the project."
)
image_file = models.ImageField(
null=True,
blank=True,
storage=media_storage(),
upload_to="projects/images",
help_text="The image used for this project in project listings and HTML meta data.",
)
image_path = models.CharField(
null=True,
blank=True,
max_length=1024,
help_text="Path of file in the project's working directory to use as this project's image. "
"Allows the project's image to update as it is re-executed.",
)
image_updated = models.DateTimeField(
null=True,
blank=True,
help_text="When the image file was last updated (e.g. from image_path).",
)
theme = models.TextField(
null=True,
blank=True,
help_text="The name of the theme to use as the default when generating content for this project."
# See note for the `Account.theme` field for why this is a TextField.
)
extra_head = models.TextField(
null=True,
blank=True,
help_text="Content to inject into the <head> element of HTML served for this project.",
)
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed in a
series job (if there is more than one).
"""
# Do not create individual pull jobs here because series job children
# are run in order of their ids; so we need to sort into groups first.
groups: Dict[int, List] = {}
for source in self.sources.all():
order = source.order or 0
if order in groups:
groups[order].append(source)
else:
groups[order] = [source]
steps: List[Job] = []
for order in sorted(groups.keys()):
sources = groups[order]
if len(sources) == 1:
steps.append(sources[0].pull(user))
else:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Pull sources in parallel",
)
parallel.children.set([source.pull(user) for source in sources])
steps.append(parallel)
if len(steps) == 1:
return steps[0]
else:
series = Job.objects.create(
project=self,
creator=user,
method=JobMethod.series.name,
description="Pull sources in series",
)
series.children.set(steps)
return series
def reflow(self, user: User) -> Optional[Job]:
"""
Reflow the dependencies between the project's files by rerunning jobs.
For all `current` files that have `upstreams` creates a new job that
re-executes the original job. Because jobs can have `secrets` and callbacks
to the original project, rather than creating a copy of the original job
we go through the `File` method e.g. `File.convert`. This more safely enables
project forking etc.
In the future should do a topological sort so that the
jobs get executed in parallel if possible, and in series if necessary.
"""
subjobs = []
for file in self.files.filter(
current=True,
upstreams__isnull=False,
# Currently limited to convert jobs but in future there
# may be other jobs that create a derived file
# e.g. running a script that create files.
job__method=JobMethod.convert.name,
).exclude(
# Currently exclude index.html files because dealt with
# in an explicit step in snapshot
Q(path="index.html")
# Exclude .bib and image files which are created
# as children of a parent file's generation
# See https://github.com/stencila/hub/issues/1024#issuecomment-799128207
| Q(path__endswith=".bib")
| Q(path__endswith=".png")
| Q(path__endswith=".jpg"),
):
# Convert jobs only have one upstream
upstream = file.upstreams.first()
subjob = upstream.convert(user, file.path)
subjobs.append(subjob)
if len(subjobs) > 0:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Update derived files",
)
parallel.children.set(subjobs)
return parallel
else:
return None
def pin(self, user: User, **callback) -> Job:
"""
Pin the project's container image.
Does not change the project's `container_image` field, but
rather, returns a pinned version of it. The callback should
use that value.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.pin.name,
params=dict(container_image=self.container_image,),
description=f"Pin container image for project '{self.name}'",
**callback,
)
def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:
"""
Archive the project's working directory.
Creates a copy of the project's working directory
on the `snapshots` storage.
"""
# Get the upload policy
policy = snapshots_storage().generate_post_policy(path)
url = policy.get("url") if policy else None
secrets = policy.get("fields") if policy else None
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.archive.name,
params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),
secrets=secrets,
description=f"Archive project '{self.name}'",
**callback,
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the project.
"""
job = Job.objects.create(
project=self,
creator=request.user if request.user.is_authenticated else None,
method=JobMethod.session.name,
params=dict(container_image=self.container_image),
description=f"Session for project '{self.name}'",
)
job.add_user(request)
return job
def make_project_creator_an_owner(
sender, instance: Project, created: bool, *args, **kwargs
):
"""
Make the project create an owner.
Makes sure each project has at least one owner.
"""
if sender is Project and created and instance.creator:
ProjectAgent.objects.create(
project=instance, user=instance.creator, role=ProjectRole.OWNER.name
)
post_save.connect(make_project_creator_an_owner, sender=Project)
class ProjectRole(EnumChoice):
|
class ProjectAgent(models.Model):
"""
A user or team.
Users or teams can be added, with a role, to a project.
"""
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="agents",
help_text="Project to which the user or team is being given access to.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A user given access to the project.",
)
team = models.ForeignKey(
AccountTeam,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A team given access to the project.",
)
role = models.CharField(
null=False,
blank=False,
max_length=32,
choices=ProjectRole.as_choices(),
help_text="Role the user or team has within the project.",
)
class Meta:
constraints = [
# Each user should only have one role for a project
models.UniqueConstraint(
fields=["project", "user"], name="%(class)s_unique_project_user"
),
# Each team should only have one role for a project
models.UniqueConstraint(
fields=["project", "team"], name="%(class)s_unique_project_team"
),
]
class ProjectEvent(models.Model):
"""
A project event.
Project events are recorded primarily to provide traceability.
There are no fixed event types and arbitrary JSON data can be stored
in the `data` field. Events may be associated with a `source` or a `user`.
"""
id = models.BigAutoField(primary_key=True, help_text="Id of the event.",)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="events",
help_text="Project to which the event applies.",
)
time = models.DateTimeField(auto_now_add=True, help_text="Time of the event.")
data = models.JSONField(help_text="Data associated with the event.")
source = models.ForeignKey(
"Source",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="Source associated with the event.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="User associated with the event.",
)
| """
A user or team role within an account.
See `get_description` for what each role can do.
Some of roles can also be applied to the public.
For example, a project might be made public with
the `REVIEWER` role allowing anyone to comment.
"""
READER = "Reader"
REVIEWER = "Reviewer"
EDITOR = "Editor"
AUTHOR = "Author"
MANAGER = "Manager"
OWNER = "Owner"
@classmethod
def get_description(cls, role: "ProjectRole"):
"""Get the description of a project role."""
return {
cls.READER.name: "Can view project, but not make edits or share with others.",
cls.REVIEWER.name: "Can view project files and leave comments, but not edit project or share with others.",
cls.EDITOR.name: "Can edit project files and leave comments, but not share with others.",
cls.AUTHOR.name: "Can edit project files and leave comments, but not share with others.",
cls.MANAGER.name: "Can edit project files, settings, and share with others.",
cls.OWNER.name: "Can edit project files, settings, share with others, as well as delete a project",
}[role.name]
@classmethod
def from_string(cls, role: str) -> "ProjectRole":
"""Get the role from a string."""
role = role.lower()
for r in cls:
if role == r.name.lower():
return r
raise ValueError('No project role matching "{}"'.format(role))
@classmethod
def and_above(cls, role: "ProjectRole") -> List["ProjectRole"]:
"""Get a list including the role and all the roles above it."""
roles: List["ProjectRole"] = []
for r in cls:
if r == role or len(roles) > 0:
roles.append(r)
return roles | identifier_body |
projects.py | import datetime
import os
from typing import Dict, List, Optional
from urllib.parse import urlencode
import shortuuid
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from meta.views import Meta
from accounts.models import Account, AccountTeam
from jobs.models import Job, JobMethod
from manager.helpers import EnumChoice
from manager.storage import (
StorageUsageMixin,
media_storage,
snapshots_storage,
working_storage,
)
from users.models import User
class ProjectLiveness(EnumChoice):
"""
Where the project content is served from.
"""
LIVE = "live"
LATEST = "latest"
PINNED = "pinned"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return (
# Live is currently disabled as a choice
# pending implementation
("live", "Use working directory"),
("latest", "Use latest snapshot"),
("pinned", "Pinned to snapshot"),
)
def generate_project_key():
"""
Generate a unique, and very difficult to guess, project key.
"""
return shortuuid.ShortUUID().random(length=32)
class Project(StorageUsageMixin, models.Model):
"""
A project.
Projects are always owned by an account.
"""
account = models.ForeignKey(
Account,
on_delete=models.CASCADE,
related_name="projects",
null=False,
blank=False,
help_text="Account that the project belongs to.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="projects_created",
help_text="The user who created the project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the project was created."
)
name = models.SlugField(
null=False,
blank=False,
help_text="Name of the project. Lowercase only and unique for the account. "
"Will be used in URLS e.g. https://hub.stenci.la/awesome-org/great-project.",
)
title = models.CharField(
null=True,
blank=True,
max_length=256,
help_text="Title of the project to display in its profile.",
)
temporary = models.BooleanField(
default=False, help_text="Is the project temporary?"
)
public = models.BooleanField(
default=True, help_text="Is the project publicly visible?"
)
featured = models.BooleanField(
default=False, help_text="Is the project to be featured in listings?"
)
key = models.CharField(
default=generate_project_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access this project if it is not public.",
)
description = models.TextField(
null=True, blank=True, help_text="Brief description of the project."
)
image_file = models.ImageField(
null=True,
blank=True,
storage=media_storage(),
upload_to="projects/images",
help_text="The image used for this project in project listings and HTML meta data.",
)
image_path = models.CharField(
null=True,
blank=True,
max_length=1024,
help_text="Path of file in the project's working directory to use as this project's image. "
"Allows the project's image to update as it is re-executed.",
)
image_updated = models.DateTimeField(
null=True, | null=True,
blank=True,
help_text="The name of the theme to use as the default when generating content for this project."
# See note for the `Account.theme` field for why this is a TextField.
)
extra_head = models.TextField(
null=True,
blank=True,
help_text="Content to inject into the <head> element of HTML served for this project.",
)
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed in a
series job (if there is more than one).
"""
# Do not create individual pull jobs here because series job children
# are run in order of their ids; so we need to sort into groups first.
groups: Dict[int, List] = {}
for source in self.sources.all():
order = source.order or 0
if order in groups:
groups[order].append(source)
else:
groups[order] = [source]
steps: List[Job] = []
for order in sorted(groups.keys()):
sources = groups[order]
if len(sources) == 1:
steps.append(sources[0].pull(user))
else:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Pull sources in parallel",
)
parallel.children.set([source.pull(user) for source in sources])
steps.append(parallel)
if len(steps) == 1:
return steps[0]
else:
series = Job.objects.create(
project=self,
creator=user,
method=JobMethod.series.name,
description="Pull sources in series",
)
series.children.set(steps)
return series
def reflow(self, user: User) -> Optional[Job]:
"""
Reflow the dependencies between the project's files by rerunning jobs.
For all `current` files that have `upstreams` creates a new job that
re-executes the original job. Because jobs can have `secrets` and callbacks
to the original project, rather than creating a copy of the original job
we go through the `File` method e.g. `File.convert`. This more safely enables
project forking etc.
In the future should do a topological sort so that the
jobs get executed in parallel if possible, and in series if necessary.
"""
subjobs = []
for file in self.files.filter(
current=True,
upstreams__isnull=False,
# Currently limited to convert jobs but in future there
# may be other jobs that create a derived file
# e.g. running a script that create files.
job__method=JobMethod.convert.name,
).exclude(
# Currently exclude index.html files because dealt with
# in an explicit step in snapshot
Q(path="index.html")
# Exclude .bib and image files which are created
# as children of a parent file's generation
# See https://github.com/stencila/hub/issues/1024#issuecomment-799128207
| Q(path__endswith=".bib")
| Q(path__endswith=".png")
| Q(path__endswith=".jpg"),
):
# Convert jobs only have one upstream
upstream = file.upstreams.first()
subjob = upstream.convert(user, file.path)
subjobs.append(subjob)
if len(subjobs) > 0:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Update derived files",
)
parallel.children.set(subjobs)
return parallel
else:
return None
def pin(self, user: User, **callback) -> Job:
"""
Pin the project's container image.
Does not change the project's `container_image` field, but
rather, returns a pinned version of it. The callback should
use that value.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.pin.name,
params=dict(container_image=self.container_image,),
description=f"Pin container image for project '{self.name}'",
**callback,
)
def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:
"""
Archive the project's working directory.
Creates a copy of the project's working directory
on the `snapshots` storage.
"""
# Get the upload policy
policy = snapshots_storage().generate_post_policy(path)
url = policy.get("url") if policy else None
secrets = policy.get("fields") if policy else None
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.archive.name,
params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),
secrets=secrets,
description=f"Archive project '{self.name}'",
**callback,
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the project.
"""
job = Job.objects.create(
project=self,
creator=request.user if request.user.is_authenticated else None,
method=JobMethod.session.name,
params=dict(container_image=self.container_image),
description=f"Session for project '{self.name}'",
)
job.add_user(request)
return job
def make_project_creator_an_owner(
sender, instance: Project, created: bool, *args, **kwargs
):
"""
Make the project create an owner.
Makes sure each project has at least one owner.
"""
if sender is Project and created and instance.creator:
ProjectAgent.objects.create(
project=instance, user=instance.creator, role=ProjectRole.OWNER.name
)
post_save.connect(make_project_creator_an_owner, sender=Project)
class ProjectRole(EnumChoice):
"""
A user or team role within an account.
See `get_description` for what each role can do.
Some of roles can also be applied to the public.
For example, a project might be made public with
the `REVIEWER` role allowing anyone to comment.
"""
READER = "Reader"
REVIEWER = "Reviewer"
EDITOR = "Editor"
AUTHOR = "Author"
MANAGER = "Manager"
OWNER = "Owner"
@classmethod
def get_description(cls, role: "ProjectRole"):
"""Get the description of a project role."""
return {
cls.READER.name: "Can view project, but not make edits or share with others.",
cls.REVIEWER.name: "Can view project files and leave comments, but not edit project or share with others.",
cls.EDITOR.name: "Can edit project files and leave comments, but not share with others.",
cls.AUTHOR.name: "Can edit project files and leave comments, but not share with others.",
cls.MANAGER.name: "Can edit project files, settings, and share with others.",
cls.OWNER.name: "Can edit project files, settings, share with others, as well as delete a project",
}[role.name]
@classmethod
def from_string(cls, role: str) -> "ProjectRole":
"""Get the role from a string."""
role = role.lower()
for r in cls:
if role == r.name.lower():
return r
raise ValueError('No project role matching "{}"'.format(role))
@classmethod
def and_above(cls, role: "ProjectRole") -> List["ProjectRole"]:
"""Get a list including the role and all the roles above it."""
roles: List["ProjectRole"] = []
for r in cls:
if r == role or len(roles) > 0:
roles.append(r)
return roles
class ProjectAgent(models.Model):
"""
A user or team.
Users or teams can be added, with a role, to a project.
"""
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="agents",
help_text="Project to which the user or team is being given access to.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A user given access to the project.",
)
team = models.ForeignKey(
AccountTeam,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A team given access to the project.",
)
role = models.CharField(
null=False,
blank=False,
max_length=32,
choices=ProjectRole.as_choices(),
help_text="Role the user or team has within the project.",
)
class Meta:
constraints = [
# Each user should only have one role for a project
models.UniqueConstraint(
fields=["project", "user"], name="%(class)s_unique_project_user"
),
# Each team should only have one role for a project
models.UniqueConstraint(
fields=["project", "team"], name="%(class)s_unique_project_team"
),
]
class ProjectEvent(models.Model):
"""
A project event.
Project events are recorded primarily to provide traceability.
There are no fixed event types and arbitrary JSON data can be stored
in the `data` field. Events may be associated with a `source` or a `user`.
"""
id = models.BigAutoField(primary_key=True, help_text="Id of the event.",)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="events",
help_text="Project to which the event applies.",
)
time = models.DateTimeField(auto_now_add=True, help_text="Time of the event.")
data = models.JSONField(help_text="Data associated with the event.")
source = models.ForeignKey(
"Source",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="Source associated with the event.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="User associated with the event.",
) | blank=True,
help_text="When the image file was last updated (e.g. from image_path).",
)
theme = models.TextField( | random_line_split |
projects.py | import datetime
import os
from typing import Dict, List, Optional
from urllib.parse import urlencode
import shortuuid
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from meta.views import Meta
from accounts.models import Account, AccountTeam
from jobs.models import Job, JobMethod
from manager.helpers import EnumChoice
from manager.storage import (
StorageUsageMixin,
media_storage,
snapshots_storage,
working_storage,
)
from users.models import User
class ProjectLiveness(EnumChoice):
"""
Where the project content is served from.
"""
LIVE = "live"
LATEST = "latest"
PINNED = "pinned"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return (
# Live is currently disabled as a choice
# pending implementation
("live", "Use working directory"),
("latest", "Use latest snapshot"),
("pinned", "Pinned to snapshot"),
)
def generate_project_key():
"""
Generate a unique, and very difficult to guess, project key.
"""
return shortuuid.ShortUUID().random(length=32)
class Project(StorageUsageMixin, models.Model):
"""
A project.
Projects are always owned by an account.
"""
account = models.ForeignKey(
Account,
on_delete=models.CASCADE,
related_name="projects",
null=False,
blank=False,
help_text="Account that the project belongs to.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="projects_created",
help_text="The user who created the project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the project was created."
)
name = models.SlugField(
null=False,
blank=False,
help_text="Name of the project. Lowercase only and unique for the account. "
"Will be used in URLS e.g. https://hub.stenci.la/awesome-org/great-project.",
)
title = models.CharField(
null=True,
blank=True,
max_length=256,
help_text="Title of the project to display in its profile.",
)
temporary = models.BooleanField(
default=False, help_text="Is the project temporary?"
)
public = models.BooleanField(
default=True, help_text="Is the project publicly visible?"
)
featured = models.BooleanField(
default=False, help_text="Is the project to be featured in listings?"
)
key = models.CharField(
default=generate_project_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access this project if it is not public.",
)
description = models.TextField(
null=True, blank=True, help_text="Brief description of the project."
)
image_file = models.ImageField(
null=True,
blank=True,
storage=media_storage(),
upload_to="projects/images",
help_text="The image used for this project in project listings and HTML meta data.",
)
image_path = models.CharField(
null=True,
blank=True,
max_length=1024,
help_text="Path of file in the project's working directory to use as this project's image. "
"Allows the project's image to update as it is re-executed.",
)
image_updated = models.DateTimeField(
null=True,
blank=True,
help_text="When the image file was last updated (e.g. from image_path).",
)
theme = models.TextField(
null=True,
blank=True,
help_text="The name of the theme to use as the default when generating content for this project."
# See note for the `Account.theme` field for why this is a TextField.
)
extra_head = models.TextField(
null=True,
blank=True,
help_text="Content to inject into the <head> element of HTML served for this project.",
)
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
|
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed in a
series job (if there is more than one).
"""
# Do not create individual pull jobs here because series job children
# are run in order of their ids; so we need to sort into groups first.
groups: Dict[int, List] = {}
for source in self.sources.all():
order = source.order or 0
if order in groups:
groups[order].append(source)
else:
groups[order] = [source]
steps: List[Job] = []
for order in sorted(groups.keys()):
sources = groups[order]
if len(sources) == 1:
steps.append(sources[0].pull(user))
else:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Pull sources in parallel",
)
parallel.children.set([source.pull(user) for source in sources])
steps.append(parallel)
if len(steps) == 1:
return steps[0]
else:
series = Job.objects.create(
project=self,
creator=user,
method=JobMethod.series.name,
description="Pull sources in series",
)
series.children.set(steps)
return series
def reflow(self, user: User) -> Optional[Job]:
"""
Reflow the dependencies between the project's files by rerunning jobs.
For all `current` files that have `upstreams` creates a new job that
re-executes the original job. Because jobs can have `secrets` and callbacks
to the original project, rather than creating a copy of the original job
we go through the `File` method e.g. `File.convert`. This more safely enables
project forking etc.
In the future should do a topological sort so that the
jobs get executed in parallel if possible, and in series if necessary.
"""
subjobs = []
for file in self.files.filter(
current=True,
upstreams__isnull=False,
# Currently limited to convert jobs but in future there
# may be other jobs that create a derived file
# e.g. running a script that create files.
job__method=JobMethod.convert.name,
).exclude(
# Currently exclude index.html files because dealt with
# in an explicit step in snapshot
Q(path="index.html")
# Exclude .bib and image files which are created
# as children of a parent file's generation
# See https://github.com/stencila/hub/issues/1024#issuecomment-799128207
| Q(path__endswith=".bib")
| Q(path__endswith=".png")
| Q(path__endswith=".jpg"),
):
# Convert jobs only have one upstream
upstream = file.upstreams.first()
subjob = upstream.convert(user, file.path)
subjobs.append(subjob)
if len(subjobs) > 0:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Update derived files",
)
parallel.children.set(subjobs)
return parallel
else:
return None
def pin(self, user: User, **callback) -> Job:
"""
Pin the project's container image.
Does not change the project's `container_image` field, but
rather, returns a pinned version of it. The callback should
use that value.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.pin.name,
params=dict(container_image=self.container_image,),
description=f"Pin container image for project '{self.name}'",
**callback,
)
def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:
"""
Archive the project's working directory.
Creates a copy of the project's working directory
on the `snapshots` storage.
"""
# Get the upload policy
policy = snapshots_storage().generate_post_policy(path)
url = policy.get("url") if policy else None
secrets = policy.get("fields") if policy else None
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.archive.name,
params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),
secrets=secrets,
description=f"Archive project '{self.name}'",
**callback,
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the project.
"""
job = Job.objects.create(
project=self,
creator=request.user if request.user.is_authenticated else None,
method=JobMethod.session.name,
params=dict(container_image=self.container_image),
description=f"Session for project '{self.name}'",
)
job.add_user(request)
return job
def make_project_creator_an_owner(
sender, instance: Project, created: bool, *args, **kwargs
):
"""
Make the project create an owner.
Makes sure each project has at least one owner.
"""
if sender is Project and created and instance.creator:
ProjectAgent.objects.create(
project=instance, user=instance.creator, role=ProjectRole.OWNER.name
)
post_save.connect(make_project_creator_an_owner, sender=Project)
class ProjectRole(EnumChoice):
"""
A user or team role within an account.
See `get_description` for what each role can do.
Some of roles can also be applied to the public.
For example, a project might be made public with
the `REVIEWER` role allowing anyone to comment.
"""
READER = "Reader"
REVIEWER = "Reviewer"
EDITOR = "Editor"
AUTHOR = "Author"
MANAGER = "Manager"
OWNER = "Owner"
@classmethod
def get_description(cls, role: "ProjectRole"):
"""Get the description of a project role."""
return {
cls.READER.name: "Can view project, but not make edits or share with others.",
cls.REVIEWER.name: "Can view project files and leave comments, but not edit project or share with others.",
cls.EDITOR.name: "Can edit project files and leave comments, but not share with others.",
cls.AUTHOR.name: "Can edit project files and leave comments, but not share with others.",
cls.MANAGER.name: "Can edit project files, settings, and share with others.",
cls.OWNER.name: "Can edit project files, settings, share with others, as well as delete a project",
}[role.name]
@classmethod
def from_string(cls, role: str) -> "ProjectRole":
"""Get the role from a string."""
role = role.lower()
for r in cls:
if role == r.name.lower():
return r
raise ValueError('No project role matching "{}"'.format(role))
@classmethod
def and_above(cls, role: "ProjectRole") -> List["ProjectRole"]:
"""Get a list including the role and all the roles above it."""
roles: List["ProjectRole"] = []
for r in cls:
if r == role or len(roles) > 0:
roles.append(r)
return roles
class ProjectAgent(models.Model):
"""
A user or team.
Users or teams can be added, with a role, to a project.
"""
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="agents",
help_text="Project to which the user or team is being given access to.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A user given access to the project.",
)
team = models.ForeignKey(
AccountTeam,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="projects",
help_text="A team given access to the project.",
)
role = models.CharField(
null=False,
blank=False,
max_length=32,
choices=ProjectRole.as_choices(),
help_text="Role the user or team has within the project.",
)
class Meta:
constraints = [
# Each user should only have one role for a project
models.UniqueConstraint(
fields=["project", "user"], name="%(class)s_unique_project_user"
),
# Each team should only have one role for a project
models.UniqueConstraint(
fields=["project", "team"], name="%(class)s_unique_project_team"
),
]
class ProjectEvent(models.Model):
"""
A project event.
Project events are recorded primarily to provide traceability.
There are no fixed event types and arbitrary JSON data can be stored
in the `data` field. Events may be associated with a `source` or a `user`.
"""
id = models.BigAutoField(primary_key=True, help_text="Id of the event.",)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="events",
help_text="Project to which the event applies.",
)
time = models.DateTimeField(auto_now_add=True, help_text="Time of the event.")
data = models.JSONField(help_text="Data associated with the event.")
source = models.ForeignKey(
"Source",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="Source associated with the event.",
)
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="events",
help_text="User associated with the event.",
)
| images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0]) | conditional_block |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def | (
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
Returns
-------
- self: This instance with matching sbfdInitiator resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of sbfdInitiator data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the sbfdInitiator resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(
self,
PortNames=None,
Active=None,
DestIPAddr=None,
MyDiscriminator=None,
PeerDiscriminator=None,
TimeoutMultiplier=None,
TxInterval=None,
):
"""Base class infrastructure that gets a list of sbfdInitiator device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- DestIPAddr (str): optional regex of destIPAddr
- MyDiscriminator (str): optional regex of myDiscriminator
- PeerDiscriminator (str): optional regex of peerDiscriminator
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
- TxInterval (str): optional regex of txInterval
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| find | identifier_name |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self): | Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
Returns
-------
- self: This instance with matching sbfdInitiator resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of sbfdInitiator data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the sbfdInitiator resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(
self,
PortNames=None,
Active=None,
DestIPAddr=None,
MyDiscriminator=None,
PeerDiscriminator=None,
TimeoutMultiplier=None,
TxInterval=None,
):
"""Base class infrastructure that gets a list of sbfdInitiator device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- DestIPAddr (str): optional regex of destIPAddr
- MyDiscriminator (str): optional regex of myDiscriminator
- PeerDiscriminator (str): optional regex of peerDiscriminator
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
- TxInterval (str): optional regex of txInterval
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals()) | # type: () -> 'Multivalue'
""" | random_line_split |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
|
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
Returns
-------
- self: This instance with matching sbfdInitiator resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of sbfdInitiator data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the sbfdInitiator resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(
self,
PortNames=None,
Active=None,
DestIPAddr=None,
MyDiscriminator=None,
PeerDiscriminator=None,
TimeoutMultiplier=None,
TxInterval=None,
):
"""Base class infrastructure that gets a list of sbfdInitiator device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- DestIPAddr (str): optional regex of destIPAddr
- MyDiscriminator (str): optional regex of myDiscriminator
- PeerDiscriminator (str): optional regex of peerDiscriminator
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
- TxInterval (str): optional regex of txInterval
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList") | conditional_block |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
|
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
Returns
-------
- self: This instance with matching sbfdInitiator resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of sbfdInitiator data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the sbfdInitiator resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(
self,
PortNames=None,
Active=None,
DestIPAddr=None,
MyDiscriminator=None,
PeerDiscriminator=None,
TimeoutMultiplier=None,
TxInterval=None,
):
"""Base class infrastructure that gets a list of sbfdInitiator device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- DestIPAddr (str): optional regex of destIPAddr
- MyDiscriminator (str): optional regex of myDiscriminator
- PeerDiscriminator (str): optional regex of peerDiscriminator
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
- TxInterval (str): optional regex of txInterval
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| """
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"]) | identifier_body |
main.go | // Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/aunum/log"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1"
configv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/csp"
tkgauth "github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/tkg"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/command/plugin"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/component"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
)
var descriptor = cliv1alpha1.PluginDescriptor{
Name: "login",
Description: "Login to the platform",
Group: cliv1alpha1.SystemCmdGroup,
Aliases: []string{"lo", "logins"},
}
var (
stderrOnly, forceCSP, staging bool
endpoint, name, apiToken, server, kubeConfig, kubecontext string
)
const (
knownGlobalHost = "cloud.vmware.com"
)
func main() {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
| }
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.GlobalServerType,
GlobalOpts: &configv1alpha1.GlobalServer{Endpoint: sanitizeEndpoint(endpoint)},
}
} else {
kubeConfig, kubecontext, err = tkgauth.KubeconfigWithPinnipedAuthLoginPlugin(endpoint, nil)
if err != nil {
log.Fatalf("Error creating kubeconfig with tanzu pinniped-auth login plugin: err-%v", err)
return nil, err
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
}
return server, err
}
func globalLogin(s *configv1alpha1.Server) (err error) {
a := configv1alpha1.GlobalServerAuth{}
apiToken, apiTokenExists := os.LookupEnv(config.EnvAPITokenKey)
issuer := csp.ProdIssuer
if staging {
issuer = csp.StgIssuer
}
if apiTokenExists {
log.Debug("API token env var is set")
} else {
apiToken, err = promptAPIToken()
if err != nil {
return err
}
}
token, err := csp.GetAccessTokenFromAPIToken(apiToken, issuer)
if err != nil {
return err
}
claims, err := csp.ParseToken(&oauth2.Token{AccessToken: token.AccessToken})
if err != nil {
return err
}
a.Issuer = issuer
a.UserName = claims.Username
a.Permissions = claims.Permissions
a.AccessToken = token.AccessToken
a.IDToken = token.IDToken
a.RefreshToken = apiToken
a.Type = "api-token"
expiresAt := time.Now().Local().Add(time.Second * time.Duration(token.ExpiresIn))
a.Expiration = metav1.NewTime(expiresAt)
s.GlobalOpts.Auth = a
err = config.PutServer(s, true)
if err != nil {
return err
}
// format
fmt.Println()
log.Success("successfully logged into global control plane")
return nil
}
// Interactive way to login to TMC. User will be prompted for token and context name.
func promptAPIToken() (apiToken string, err error) {
consoleURL := url.URL{
Scheme: "https",
Host: "console.cloud.vmware.com",
Path: "/csp/gateway/portal/",
Fragment: "/user/tokens",
}
// format
fmt.Println()
log.Infof(
"If you don't have an API token, visit the VMware Cloud Services console, select your organization, and create an API token with the TMC service roles:\n %s\n",
consoleURL.String(),
)
promptOpts := getPromptOpts()
// format
fmt.Println()
err = component.Prompt(
&component.PromptConfig{
Message: "API Token",
},
&apiToken,
promptOpts...,
)
return
}
func managementClusterLogin(s *configv1alpha1.Server) error {
if s.ManagementClusterOpts.Path != "" && s.ManagementClusterOpts.Context != "" {
_, err := tkgauth.GetServerKubernetesVersion(s.ManagementClusterOpts.Path, s.ManagementClusterOpts.Context)
if err != nil {
log.Fatalf("failed to login to the management cluster %s, err-%v", s.Name, err)
return err
}
err = config.PutServer(s, true)
if err != nil {
return err
}
log.Successf("successfully logged in to management cluster using the kubeconfig %s", s.Name)
return nil
}
return fmt.Errorf("not yet implemented")
}
func sanitizeEndpoint(endpoint string) string {
if len(strings.Split(endpoint, ":")) == 1 {
return fmt.Sprintf("%s:443", endpoint)
}
return endpoint
}
func getDefaultKubeconfigPath() string {
kubeConfigFilename := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
// fallback to default kubeconfig file location if no env variable set
if kubeConfigFilename == "" {
kubeConfigFilename = clientcmd.RecommendedHomeFile
}
return kubeConfigFilename
}
| return
}
| conditional_block |
main.go | // Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/aunum/log"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1"
configv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/csp"
tkgauth "github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/tkg"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/command/plugin"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/component"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
)
var descriptor = cliv1alpha1.PluginDescriptor{
Name: "login",
Description: "Login to the platform",
Group: cliv1alpha1.SystemCmdGroup,
Aliases: []string{"lo", "logins"},
}
var (
stderrOnly, forceCSP, staging bool
endpoint, name, apiToken, server, kubeConfig, kubecontext string
)
const (
knownGlobalHost = "cloud.vmware.com"
)
func main() {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
| func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.GlobalServerType,
GlobalOpts: &configv1alpha1.GlobalServer{Endpoint: sanitizeEndpoint(endpoint)},
}
} else {
kubeConfig, kubecontext, err = tkgauth.KubeconfigWithPinnipedAuthLoginPlugin(endpoint, nil)
if err != nil {
log.Fatalf("Error creating kubeconfig with tanzu pinniped-auth login plugin: err-%v", err)
return nil, err
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
}
return server, err
}
func globalLogin(s *configv1alpha1.Server) (err error) {
a := configv1alpha1.GlobalServerAuth{}
apiToken, apiTokenExists := os.LookupEnv(config.EnvAPITokenKey)
issuer := csp.ProdIssuer
if staging {
issuer = csp.StgIssuer
}
if apiTokenExists {
log.Debug("API token env var is set")
} else {
apiToken, err = promptAPIToken()
if err != nil {
return err
}
}
token, err := csp.GetAccessTokenFromAPIToken(apiToken, issuer)
if err != nil {
return err
}
claims, err := csp.ParseToken(&oauth2.Token{AccessToken: token.AccessToken})
if err != nil {
return err
}
a.Issuer = issuer
a.UserName = claims.Username
a.Permissions = claims.Permissions
a.AccessToken = token.AccessToken
a.IDToken = token.IDToken
a.RefreshToken = apiToken
a.Type = "api-token"
expiresAt := time.Now().Local().Add(time.Second * time.Duration(token.ExpiresIn))
a.Expiration = metav1.NewTime(expiresAt)
s.GlobalOpts.Auth = a
err = config.PutServer(s, true)
if err != nil {
return err
}
// format
fmt.Println()
log.Success("successfully logged into global control plane")
return nil
}
// Interactive way to login to TMC. User will be prompted for token and context name.
func promptAPIToken() (apiToken string, err error) {
consoleURL := url.URL{
Scheme: "https",
Host: "console.cloud.vmware.com",
Path: "/csp/gateway/portal/",
Fragment: "/user/tokens",
}
// format
fmt.Println()
log.Infof(
"If you don't have an API token, visit the VMware Cloud Services console, select your organization, and create an API token with the TMC service roles:\n %s\n",
consoleURL.String(),
)
promptOpts := getPromptOpts()
// format
fmt.Println()
err = component.Prompt(
&component.PromptConfig{
Message: "API Token",
},
&apiToken,
promptOpts...,
)
return
}
func managementClusterLogin(s *configv1alpha1.Server) error {
if s.ManagementClusterOpts.Path != "" && s.ManagementClusterOpts.Context != "" {
_, err := tkgauth.GetServerKubernetesVersion(s.ManagementClusterOpts.Path, s.ManagementClusterOpts.Context)
if err != nil {
log.Fatalf("failed to login to the management cluster %s, err-%v", s.Name, err)
return err
}
err = config.PutServer(s, true)
if err != nil {
return err
}
log.Successf("successfully logged in to management cluster using the kubeconfig %s", s.Name)
return nil
}
return fmt.Errorf("not yet implemented")
}
func sanitizeEndpoint(endpoint string) string {
if len(strings.Split(endpoint, ":")) == 1 {
return fmt.Sprintf("%s:443", endpoint)
}
return endpoint
}
func getDefaultKubeconfigPath() string {
kubeConfigFilename := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
// fallback to default kubeconfig file location if no env variable set
if kubeConfigFilename == "" {
kubeConfigFilename = clientcmd.RecommendedHomeFile
}
return kubeConfigFilename
}
| promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
| identifier_body |
main.go | // Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/aunum/log"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1"
configv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/csp"
tkgauth "github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/tkg"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/command/plugin"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/component" | var descriptor = cliv1alpha1.PluginDescriptor{
Name: "login",
Description: "Login to the platform",
Group: cliv1alpha1.SystemCmdGroup,
Aliases: []string{"lo", "logins"},
}
var (
stderrOnly, forceCSP, staging bool
endpoint, name, apiToken, server, kubeConfig, kubecontext string
)
const (
knownGlobalHost = "cloud.vmware.com"
)
func main() {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.GlobalServerType,
GlobalOpts: &configv1alpha1.GlobalServer{Endpoint: sanitizeEndpoint(endpoint)},
}
} else {
kubeConfig, kubecontext, err = tkgauth.KubeconfigWithPinnipedAuthLoginPlugin(endpoint, nil)
if err != nil {
log.Fatalf("Error creating kubeconfig with tanzu pinniped-auth login plugin: err-%v", err)
return nil, err
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
}
return server, err
}
func globalLogin(s *configv1alpha1.Server) (err error) {
a := configv1alpha1.GlobalServerAuth{}
apiToken, apiTokenExists := os.LookupEnv(config.EnvAPITokenKey)
issuer := csp.ProdIssuer
if staging {
issuer = csp.StgIssuer
}
if apiTokenExists {
log.Debug("API token env var is set")
} else {
apiToken, err = promptAPIToken()
if err != nil {
return err
}
}
token, err := csp.GetAccessTokenFromAPIToken(apiToken, issuer)
if err != nil {
return err
}
claims, err := csp.ParseToken(&oauth2.Token{AccessToken: token.AccessToken})
if err != nil {
return err
}
a.Issuer = issuer
a.UserName = claims.Username
a.Permissions = claims.Permissions
a.AccessToken = token.AccessToken
a.IDToken = token.IDToken
a.RefreshToken = apiToken
a.Type = "api-token"
expiresAt := time.Now().Local().Add(time.Second * time.Duration(token.ExpiresIn))
a.Expiration = metav1.NewTime(expiresAt)
s.GlobalOpts.Auth = a
err = config.PutServer(s, true)
if err != nil {
return err
}
// format
fmt.Println()
log.Success("successfully logged into global control plane")
return nil
}
// Interactive way to login to TMC. User will be prompted for token and context name.
func promptAPIToken() (apiToken string, err error) {
consoleURL := url.URL{
Scheme: "https",
Host: "console.cloud.vmware.com",
Path: "/csp/gateway/portal/",
Fragment: "/user/tokens",
}
// format
fmt.Println()
log.Infof(
"If you don't have an API token, visit the VMware Cloud Services console, select your organization, and create an API token with the TMC service roles:\n %s\n",
consoleURL.String(),
)
promptOpts := getPromptOpts()
// format
fmt.Println()
err = component.Prompt(
&component.PromptConfig{
Message: "API Token",
},
&apiToken,
promptOpts...,
)
return
}
func managementClusterLogin(s *configv1alpha1.Server) error {
if s.ManagementClusterOpts.Path != "" && s.ManagementClusterOpts.Context != "" {
_, err := tkgauth.GetServerKubernetesVersion(s.ManagementClusterOpts.Path, s.ManagementClusterOpts.Context)
if err != nil {
log.Fatalf("failed to login to the management cluster %s, err-%v", s.Name, err)
return err
}
err = config.PutServer(s, true)
if err != nil {
return err
}
log.Successf("successfully logged in to management cluster using the kubeconfig %s", s.Name)
return nil
}
return fmt.Errorf("not yet implemented")
}
func sanitizeEndpoint(endpoint string) string {
if len(strings.Split(endpoint, ":")) == 1 {
return fmt.Sprintf("%s:443", endpoint)
}
return endpoint
}
func getDefaultKubeconfigPath() string {
kubeConfigFilename := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
// fallback to default kubeconfig file location if no env variable set
if kubeConfigFilename == "" {
kubeConfigFilename = clientcmd.RecommendedHomeFile
}
return kubeConfigFilename
} | "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
)
| random_line_split |
main.go | // Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/aunum/log"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1"
configv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/csp"
tkgauth "github.com/vmware-tanzu/tanzu-framework/pkg/v1/auth/tkg"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/command/plugin"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/component"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
)
var descriptor = cliv1alpha1.PluginDescriptor{
Name: "login",
Description: "Login to the platform",
Group: cliv1alpha1.SystemCmdGroup,
Aliases: []string{"lo", "logins"},
}
var (
stderrOnly, forceCSP, staging bool
endpoint, name, apiToken, server, kubeConfig, kubecontext string
)
const (
knownGlobalHost = "cloud.vmware.com"
)
func | () {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.GlobalServerType,
GlobalOpts: &configv1alpha1.GlobalServer{Endpoint: sanitizeEndpoint(endpoint)},
}
} else {
kubeConfig, kubecontext, err = tkgauth.KubeconfigWithPinnipedAuthLoginPlugin(endpoint, nil)
if err != nil {
log.Fatalf("Error creating kubeconfig with tanzu pinniped-auth login plugin: err-%v", err)
return nil, err
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
}
return server, err
}
func globalLogin(s *configv1alpha1.Server) (err error) {
a := configv1alpha1.GlobalServerAuth{}
apiToken, apiTokenExists := os.LookupEnv(config.EnvAPITokenKey)
issuer := csp.ProdIssuer
if staging {
issuer = csp.StgIssuer
}
if apiTokenExists {
log.Debug("API token env var is set")
} else {
apiToken, err = promptAPIToken()
if err != nil {
return err
}
}
token, err := csp.GetAccessTokenFromAPIToken(apiToken, issuer)
if err != nil {
return err
}
claims, err := csp.ParseToken(&oauth2.Token{AccessToken: token.AccessToken})
if err != nil {
return err
}
a.Issuer = issuer
a.UserName = claims.Username
a.Permissions = claims.Permissions
a.AccessToken = token.AccessToken
a.IDToken = token.IDToken
a.RefreshToken = apiToken
a.Type = "api-token"
expiresAt := time.Now().Local().Add(time.Second * time.Duration(token.ExpiresIn))
a.Expiration = metav1.NewTime(expiresAt)
s.GlobalOpts.Auth = a
err = config.PutServer(s, true)
if err != nil {
return err
}
// format
fmt.Println()
log.Success("successfully logged into global control plane")
return nil
}
// Interactive way to login to TMC. User will be prompted for token and context name.
func promptAPIToken() (apiToken string, err error) {
consoleURL := url.URL{
Scheme: "https",
Host: "console.cloud.vmware.com",
Path: "/csp/gateway/portal/",
Fragment: "/user/tokens",
}
// format
fmt.Println()
log.Infof(
"If you don't have an API token, visit the VMware Cloud Services console, select your organization, and create an API token with the TMC service roles:\n %s\n",
consoleURL.String(),
)
promptOpts := getPromptOpts()
// format
fmt.Println()
err = component.Prompt(
&component.PromptConfig{
Message: "API Token",
},
&apiToken,
promptOpts...,
)
return
}
func managementClusterLogin(s *configv1alpha1.Server) error {
if s.ManagementClusterOpts.Path != "" && s.ManagementClusterOpts.Context != "" {
_, err := tkgauth.GetServerKubernetesVersion(s.ManagementClusterOpts.Path, s.ManagementClusterOpts.Context)
if err != nil {
log.Fatalf("failed to login to the management cluster %s, err-%v", s.Name, err)
return err
}
err = config.PutServer(s, true)
if err != nil {
return err
}
log.Successf("successfully logged in to management cluster using the kubeconfig %s", s.Name)
return nil
}
return fmt.Errorf("not yet implemented")
}
func sanitizeEndpoint(endpoint string) string {
if len(strings.Split(endpoint, ":")) == 1 {
return fmt.Sprintf("%s:443", endpoint)
}
return endpoint
}
func getDefaultKubeconfigPath() string {
kubeConfigFilename := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
// fallback to default kubeconfig file location if no env variable set
if kubeConfigFilename == "" {
kubeConfigFilename = clientcmd.RecommendedHomeFile
}
return kubeConfigFilename
}
| main | identifier_name |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String |
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
}
}
// FIXME: tests
// #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// let account3 = Account::new(Some("TL;DR"), "three@mail.com");
// let account4 = Account::new(Some("TL,DR"), "lol@mail.com");
// let account5 = Account::new(Some("TL:DR"), "rofl@mail.com");
// let account6 = Account::new(Some("TL.DR"), "rust@mail.com");
// assert_eq!(&config.address(&account1), "Config Name <one@mail.com>");
// assert_eq!(&config.address(&account2), "Two <two@mail.com>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <three@mail.com>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <lol@mail.com>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <rofl@mail.com>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <rust@mail.com>");
// }
// }
| {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
} | identifier_body |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
} | // #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// let account3 = Account::new(Some("TL;DR"), "three@mail.com");
// let account4 = Account::new(Some("TL,DR"), "lol@mail.com");
// let account5 = Account::new(Some("TL:DR"), "rofl@mail.com");
// let account6 = Account::new(Some("TL.DR"), "rust@mail.com");
// assert_eq!(&config.address(&account1), "Config Name <one@mail.com>");
// assert_eq!(&config.address(&account2), "Two <two@mail.com>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <three@mail.com>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <lol@mail.com>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <rofl@mail.com>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <rust@mail.com>");
// }
// } | }
// FIXME: tests | random_line_split |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn | () -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
}
}
// FIXME: tests
// #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "one@mail.com");
// let account2 = Account::new(Some("Two"), "two@mail.com");
// let account3 = Account::new(Some("TL;DR"), "three@mail.com");
// let account4 = Account::new(Some("TL,DR"), "lol@mail.com");
// let account5 = Account::new(Some("TL:DR"), "rofl@mail.com");
// let account6 = Account::new(Some("TL.DR"), "rust@mail.com");
// assert_eq!(&config.address(&account1), "Config Name <one@mail.com>");
// assert_eq!(&config.address(&account2), "Two <two@mail.com>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <three@mail.com>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <lol@mail.com>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <rofl@mail.com>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <rust@mail.com>");
// }
// }
| path_from_home | identifier_name |
zip.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"archive/zip"
"io"
"os"
"path/filepath"
"strings"
"github.com/apache/openwhisk-wskdeploy/wski18n"
"github.com/apache/openwhisk-wskdeploy/wskprint"
)
const PATH_WILDCARD = "*"
const ONE_DIR_UP = "../"
func NewZipWriter(src string, des string, include [][]string, exclude []string, manifestFilePath string) *ZipWriter {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
}
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verboseMsg string
// create zip file e.g. greeting.zip
if zipFile, err = os.Create(zw.des); err != nil {
return err
}
defer zipFile.Close()
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_CREATING_ZIP_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: zipFile.Name(),
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
// creating a new zip writter for greeting.zip
zw.zipWriter = zip.NewWriter(zipFile)
// build a map of file names and bool indicating whether the file is included or excluded
// iterate over the directory specified in "function", find the list of files and mark them as not excluded
if err = zw.findExcludedIncludedFiles(zw.src, false); err != nil {
return err
}
if err = zw.buildExcludeMetadata(); err != nil {
return err
}
// walk file system rooted at the directory specified in "function"
// walk over each file and dir under root directory e.g. function: actions/greeting
// add actions/greeting/index.js and actions/greeting/package.json to zip file
if err = filepath.Walk(zw.src, zw.zipFile); err != nil {
return nil
}
// maintain a list of included files and/or directories with their destination
var includeInfo []Include
includeInfo, err = zw.buildIncludeMetadata()
if err != nil {
return err
}
for _, i := range includeInfo |
// now close the zip file greeting.zip as all the included items
// are added into the zip file along with the action root dir
if err = zw.zipWriter.Close(); err != nil {
return err
}
// and its safe to delete the files/directories which we copied earlier
// to include them in the zip file greeting.zip
for _, i := range includeInfo {
if filepath.Clean(i.source) != filepath.Clean(i.destination) {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_DELETING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
os.RemoveAll(i.destination)
}
}
return nil
}
| {
if i.source != i.destination {
// now determine whether the included item is file or dir
// it could list something like this as well, "actions/common/*.js"
if fileInfo, err = os.Stat(i.source); err != nil {
return err
}
// if the included item is a directory, call a function to copy the
// entire directory recursively including its subdirectories and files
if fileInfo.Mode().IsDir() {
if err = copyDir(i.source, i.destination); err != nil {
return err
}
// if the included item is a file, call a function to copy the file
// along with its path by creating the parent directories
} else if fileInfo.Mode().IsRegular() {
if err = copyFile(i.source, i.destination); err != nil {
return err
}
}
}
// add included item into zip file greeting.zip
if err = filepath.Walk(i.destination, zw.zipFile); err != nil {
return nil
}
} | conditional_block |
zip.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"archive/zip"
"io"
"os"
"path/filepath"
"strings"
"github.com/apache/openwhisk-wskdeploy/wski18n"
"github.com/apache/openwhisk-wskdeploy/wskprint"
)
const PATH_WILDCARD = "*"
const ONE_DIR_UP = "../"
func NewZipWriter(src string, des string, include [][]string, exclude []string, manifestFilePath string) *ZipWriter {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
}
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) | (functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verboseMsg string
// create zip file e.g. greeting.zip
if zipFile, err = os.Create(zw.des); err != nil {
return err
}
defer zipFile.Close()
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_CREATING_ZIP_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: zipFile.Name(),
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
// creating a new zip writter for greeting.zip
zw.zipWriter = zip.NewWriter(zipFile)
// build a map of file names and bool indicating whether the file is included or excluded
// iterate over the directory specified in "function", find the list of files and mark them as not excluded
if err = zw.findExcludedIncludedFiles(zw.src, false); err != nil {
return err
}
if err = zw.buildExcludeMetadata(); err != nil {
return err
}
// walk file system rooted at the directory specified in "function"
// walk over each file and dir under root directory e.g. function: actions/greeting
// add actions/greeting/index.js and actions/greeting/package.json to zip file
if err = filepath.Walk(zw.src, zw.zipFile); err != nil {
return nil
}
// maintain a list of included files and/or directories with their destination
var includeInfo []Include
includeInfo, err = zw.buildIncludeMetadata()
if err != nil {
return err
}
for _, i := range includeInfo {
if i.source != i.destination {
// now determine whether the included item is file or dir
// it could list something like this as well, "actions/common/*.js"
if fileInfo, err = os.Stat(i.source); err != nil {
return err
}
// if the included item is a directory, call a function to copy the
// entire directory recursively including its subdirectories and files
if fileInfo.Mode().IsDir() {
if err = copyDir(i.source, i.destination); err != nil {
return err
}
// if the included item is a file, call a function to copy the file
// along with its path by creating the parent directories
} else if fileInfo.Mode().IsRegular() {
if err = copyFile(i.source, i.destination); err != nil {
return err
}
}
}
// add included item into zip file greeting.zip
if err = filepath.Walk(i.destination, zw.zipFile); err != nil {
return nil
}
}
// now close the zip file greeting.zip as all the included items
// are added into the zip file along with the action root dir
if err = zw.zipWriter.Close(); err != nil {
return err
}
// and its safe to delete the files/directories which we copied earlier
// to include them in the zip file greeting.zip
for _, i := range includeInfo {
if filepath.Clean(i.source) != filepath.Clean(i.destination) {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_DELETING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
os.RemoveAll(i.destination)
}
}
return nil
}
| findExcludedIncludedFiles | identifier_name |
zip.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
| package utils
import (
"archive/zip"
"io"
"os"
"path/filepath"
"strings"
"github.com/apache/openwhisk-wskdeploy/wski18n"
"github.com/apache/openwhisk-wskdeploy/wskprint"
)
const PATH_WILDCARD = "*"
const ONE_DIR_UP = "../"
func NewZipWriter(src string, des string, include [][]string, exclude []string, manifestFilePath string) *ZipWriter {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
}
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verboseMsg string
// create zip file e.g. greeting.zip
if zipFile, err = os.Create(zw.des); err != nil {
return err
}
defer zipFile.Close()
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_CREATING_ZIP_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: zipFile.Name(),
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
// creating a new zip writter for greeting.zip
zw.zipWriter = zip.NewWriter(zipFile)
// build a map of file names and bool indicating whether the file is included or excluded
// iterate over the directory specified in "function", find the list of files and mark them as not excluded
if err = zw.findExcludedIncludedFiles(zw.src, false); err != nil {
return err
}
if err = zw.buildExcludeMetadata(); err != nil {
return err
}
// walk file system rooted at the directory specified in "function"
// walk over each file and dir under root directory e.g. function: actions/greeting
// add actions/greeting/index.js and actions/greeting/package.json to zip file
if err = filepath.Walk(zw.src, zw.zipFile); err != nil {
return nil
}
// maintain a list of included files and/or directories with their destination
var includeInfo []Include
includeInfo, err = zw.buildIncludeMetadata()
if err != nil {
return err
}
for _, i := range includeInfo {
if i.source != i.destination {
// now determine whether the included item is file or dir
// it could list something like this as well, "actions/common/*.js"
if fileInfo, err = os.Stat(i.source); err != nil {
return err
}
// if the included item is a directory, call a function to copy the
// entire directory recursively including its subdirectories and files
if fileInfo.Mode().IsDir() {
if err = copyDir(i.source, i.destination); err != nil {
return err
}
// if the included item is a file, call a function to copy the file
// along with its path by creating the parent directories
} else if fileInfo.Mode().IsRegular() {
if err = copyFile(i.source, i.destination); err != nil {
return err
}
}
}
// add included item into zip file greeting.zip
if err = filepath.Walk(i.destination, zw.zipFile); err != nil {
return nil
}
}
// now close the zip file greeting.zip as all the included items
// are added into the zip file along with the action root dir
if err = zw.zipWriter.Close(); err != nil {
return err
}
// and its safe to delete the files/directories which we copied earlier
// to include them in the zip file greeting.zip
for _, i := range includeInfo {
if filepath.Clean(i.source) != filepath.Clean(i.destination) {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_DELETING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
os.RemoveAll(i.destination)
}
}
return nil
} | random_line_split | |
zip.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"archive/zip"
"io"
"os"
"path/filepath"
"strings"
"github.com/apache/openwhisk-wskdeploy/wski18n"
"github.com/apache/openwhisk-wskdeploy/wskprint"
)
const PATH_WILDCARD = "*"
const ONE_DIR_UP = "../"
func NewZipWriter(src string, des string, include [][]string, exclude []string, manifestFilePath string) *ZipWriter |
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verboseMsg string
// create zip file e.g. greeting.zip
if zipFile, err = os.Create(zw.des); err != nil {
return err
}
defer zipFile.Close()
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_CREATING_ZIP_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: zipFile.Name(),
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
// creating a new zip writter for greeting.zip
zw.zipWriter = zip.NewWriter(zipFile)
// build a map of file names and bool indicating whether the file is included or excluded
// iterate over the directory specified in "function", find the list of files and mark them as not excluded
if err = zw.findExcludedIncludedFiles(zw.src, false); err != nil {
return err
}
if err = zw.buildExcludeMetadata(); err != nil {
return err
}
// walk file system rooted at the directory specified in "function"
// walk over each file and dir under root directory e.g. function: actions/greeting
// add actions/greeting/index.js and actions/greeting/package.json to zip file
if err = filepath.Walk(zw.src, zw.zipFile); err != nil {
return nil
}
// maintain a list of included files and/or directories with their destination
var includeInfo []Include
includeInfo, err = zw.buildIncludeMetadata()
if err != nil {
return err
}
for _, i := range includeInfo {
if i.source != i.destination {
// now determine whether the included item is file or dir
// it could list something like this as well, "actions/common/*.js"
if fileInfo, err = os.Stat(i.source); err != nil {
return err
}
// if the included item is a directory, call a function to copy the
// entire directory recursively including its subdirectories and files
if fileInfo.Mode().IsDir() {
if err = copyDir(i.source, i.destination); err != nil {
return err
}
// if the included item is a file, call a function to copy the file
// along with its path by creating the parent directories
} else if fileInfo.Mode().IsRegular() {
if err = copyFile(i.source, i.destination); err != nil {
return err
}
}
}
// add included item into zip file greeting.zip
if err = filepath.Walk(i.destination, zw.zipFile); err != nil {
return nil
}
}
// now close the zip file greeting.zip as all the included items
// are added into the zip file along with the action root dir
if err = zw.zipWriter.Close(); err != nil {
return err
}
// and its safe to delete the files/directories which we copied earlier
// to include them in the zip file greeting.zip
for _, i := range includeInfo {
if filepath.Clean(i.source) != filepath.Clean(i.destination) {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_DELETING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
os.RemoveAll(i.destination)
}
}
return nil
}
| {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
} | identifier_body |
ingress.go | // Copyright 2016-2022, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package await
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/await/informers"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/clients"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/kinds"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/metadata"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/openapi"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
logger "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
// ------------------------------------------------------------------------------------------------
// Await logic for extensions/v1beta1/Ingress.
//
// The goal of this code is to provide a fine-grained account of the status of a Kubernetes Ingress
// resource as it is being initialized. The idea is that if something goes wrong early, we want
// to alert the user so they can cancel the operation instead of waiting for timeout (~10 minutes).
//
// The design of this awaiter is fundamentally an event loop on four channels:
//
// 1. The Ingress channel, to which the Kubernetes API server will proactively push every change
// (additions, modifications, deletions) to any Ingress it knows about.
// 2. The Endpoint channel, which is the same idea as the Ingress channel, except it gets updates
// to Endpoint objects.
// 3. A timeout channel, which fires after some time.
// 4. A cancellation channel, with which the user can signal cancellation (e.g., using SIGINT).
//
// The `ingressInitAwaiter` will synchronously process events from the union of all these channels.
// Any time the success conditions described above a reached, we will terminate the awaiter.
//
// x-refs:
// * https://github.com/nginxinc/kubernetes-ingress/blob/5847d1f3906287d2771f3767d61c15ac02522caa/docs/report-ingress-status.md
// ------------------------------------------------------------------------------------------------
const (
DefaultIngressTimeoutMins = 10
)
type ingressInitAwaiter struct {
config createAwaitConfig
ingress *unstructured.Unstructured
ingressReady bool
endpointsSettled bool
endpointEventsCount uint64
knownEndpointObjects sets.String
knownExternalNameServices sets.String
}
func makeIngressInitAwaiter(c createAwaitConfig) *ingressInitAwaiter {
return &ingressInitAwaiter{
config: c,
ingress: c.currentOutputs,
ingressReady: false,
endpointsSettled: false,
knownEndpointObjects: sets.NewString(),
knownExternalNameServices: sets.NewString(),
}
}
func awaitIngressInit(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Await()
}
func awaitIngressRead(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Read()
}
func awaitIngressUpdate(u updateAwaitConfig) error {
return makeIngressInitAwaiter(u.createAwaitConfig).Await()
}
func (iia *ingressInitAwaiter) Await() error {
//
// We succeed only when all of the following are true:
//
// 1. Ingress object exists.
// 2. Endpoint objects exist with matching names for each Ingress path (except when Service
// type is ExternalName).
// 3. Ingress entry exists for .status.loadBalancer.ingress.
//
stopper := make(chan struct{})
defer close(stopper)
informerFactory := informers.NewInformerFactory(iia.config.clientSet,
informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace()))
informerFactory.Start(stopper)
ingressEvents := make(chan watch.Event)
ingressInformer, err := informers.New(informerFactory, informers.ForGVR(schema.GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) {
continue
}
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func | (host, path, serviceName string) string {
rulePath := path
if host != "" {
rulePath = host + path
}
// It is valid for a user not to specify either a host or path [1]. In this case, any traffic not
// matching another rule is routed to the specified Service for this rule. Print
// `"" (default path)` to make this expectation clear to users.
//
// [1] https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#httpingresspath-v1beta1-extensions
if rulePath == "" {
rulePath = `"" (default path)`
} else {
rulePath = fmt.Sprintf("%q", rulePath)
}
// [host][path] -> serviceName
return fmt.Sprintf("%s -> %q", rulePath, serviceName)
}
func (iia *ingressInitAwaiter) processEndpointEvent(event watch.Event, settledCh chan<- struct{}) {
// Get endpoint object.
endpoint, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Endpoint watch received unknown object type %q",
reflect.TypeOf(endpoint))
return
}
iia.endpointEventsCount++
name := endpoint.GetName()
switch event.Type {
case watch.Added, watch.Modified:
iia.knownEndpointObjects.Insert(name)
case watch.Deleted:
iia.knownEndpointObjects.Delete(name)
// NOTE: Unlike `processServiceEvent` don't return; we still want to set
// `iia.endpointsSettled` to `false`.
}
// Every time we get an update to one of our endpoints objects, give it a few seconds
// for them to settle.
iia.endpointsSettled = false
go func() {
time.Sleep(10 * time.Second)
settledCh <- struct{}{}
}()
}
func (iia *ingressInitAwaiter) errorMessages() []string {
messages := make([]string, 0)
if _, ready := iia.checkIfEndpointsReady(); !ready {
messages = append(messages, "Ingress has at least one rule with an unavailable target endpoint.")
}
if !iia.ingressReady {
messages = append(messages,
"Ingress .status.loadBalancer field was not updated with a hostname/IP address. "+
"\n for more information about this error, see https://pulumi.io/xdv72s")
}
return messages
}
func (iia *ingressInitAwaiter) checkAndLogStatus() bool {
_, ready := iia.checkIfEndpointsReady()
success := iia.ingressReady && ready
if success {
iia.config.logStatus(diag.Info,
fmt.Sprintf("%sIngress initialization complete", cmdutil.EmojiOr("✅ ", "")))
} else if ready {
iia.config.logStatus(diag.Info, "[2/3] Waiting for update of .status.loadBalancer with hostname/IP")
}
return success
}
func (iia *ingressInitAwaiter) makeClients() (
ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error,
) {
ingressClient, err = clients.ResourceClient(
kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Ingress %q",
iia.config.currentInputs.GetName())
}
endpointsClient, err = clients.ResourceClient(
kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Endpoints associated with Ingress %q",
iia.config.currentInputs.GetName())
}
servicesClient, err = clients.ResourceClient(
kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Services associated with Ingress %q",
iia.config.currentInputs.GetName())
}
return
}
| expectedIngressPath | identifier_name |
ingress.go | // Copyright 2016-2022, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package await
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/await/informers"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/clients"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/kinds"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/metadata"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/openapi"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
logger "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
// ------------------------------------------------------------------------------------------------
// Await logic for extensions/v1beta1/Ingress.
//
// The goal of this code is to provide a fine-grained account of the status of a Kubernetes Ingress
// resource as it is being initialized. The idea is that if something goes wrong early, we want
// to alert the user so they can cancel the operation instead of waiting for timeout (~10 minutes).
//
// The design of this awaiter is fundamentally an event loop on four channels:
//
// 1. The Ingress channel, to which the Kubernetes API server will proactively push every change
// (additions, modifications, deletions) to any Ingress it knows about.
// 2. The Endpoint channel, which is the same idea as the Ingress channel, except it gets updates
// to Endpoint objects.
// 3. A timeout channel, which fires after some time.
// 4. A cancellation channel, with which the user can signal cancellation (e.g., using SIGINT).
//
// The `ingressInitAwaiter` will synchronously process events from the union of all these channels.
// Any time the success conditions described above a reached, we will terminate the awaiter.
//
// x-refs:
// * https://github.com/nginxinc/kubernetes-ingress/blob/5847d1f3906287d2771f3767d61c15ac02522caa/docs/report-ingress-status.md
// ------------------------------------------------------------------------------------------------
const (
DefaultIngressTimeoutMins = 10
)
type ingressInitAwaiter struct {
config createAwaitConfig
ingress *unstructured.Unstructured
ingressReady bool
endpointsSettled bool
endpointEventsCount uint64
knownEndpointObjects sets.String
knownExternalNameServices sets.String
}
func makeIngressInitAwaiter(c createAwaitConfig) *ingressInitAwaiter {
return &ingressInitAwaiter{
config: c,
ingress: c.currentOutputs,
ingressReady: false,
endpointsSettled: false,
knownEndpointObjects: sets.NewString(),
knownExternalNameServices: sets.NewString(),
}
}
func awaitIngressInit(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Await()
}
func awaitIngressRead(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Read()
}
func awaitIngressUpdate(u updateAwaitConfig) error {
return makeIngressInitAwaiter(u.createAwaitConfig).Await()
}
func (iia *ingressInitAwaiter) Await() error {
//
// We succeed only when all of the following are true:
//
// 1. Ingress object exists.
// 2. Endpoint objects exist with matching names for each Ingress path (except when Service
// type is ExternalName).
// 3. Ingress entry exists for .status.loadBalancer.ingress.
//
stopper := make(chan struct{})
defer close(stopper)
informerFactory := informers.NewInformerFactory(iia.config.clientSet,
informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace()))
informerFactory.Start(stopper)
ingressEvents := make(chan watch.Event)
ingressInformer, err := informers.New(informerFactory, informers.ForGVR(schema.GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) |
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func expectedIngressPath(host, path, serviceName string) string {
rulePath := path
if host != "" {
rulePath = host + path
}
// It is valid for a user not to specify either a host or path [1]. In this case, any traffic not
// matching another rule is routed to the specified Service for this rule. Print
// `"" (default path)` to make this expectation clear to users.
//
// [1] https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#httpingresspath-v1beta1-extensions
if rulePath == "" {
rulePath = `"" (default path)`
} else {
rulePath = fmt.Sprintf("%q", rulePath)
}
// [host][path] -> serviceName
return fmt.Sprintf("%s -> %q", rulePath, serviceName)
}
func (iia *ingressInitAwaiter) processEndpointEvent(event watch.Event, settledCh chan<- struct{}) {
// Get endpoint object.
endpoint, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Endpoint watch received unknown object type %q",
reflect.TypeOf(endpoint))
return
}
iia.endpointEventsCount++
name := endpoint.GetName()
switch event.Type {
case watch.Added, watch.Modified:
iia.knownEndpointObjects.Insert(name)
case watch.Deleted:
iia.knownEndpointObjects.Delete(name)
// NOTE: Unlike `processServiceEvent` don't return; we still want to set
// `iia.endpointsSettled` to `false`.
}
// Every time we get an update to one of our endpoints objects, give it a few seconds
// for them to settle.
iia.endpointsSettled = false
go func() {
time.Sleep(10 * time.Second)
settledCh <- struct{}{}
}()
}
func (iia *ingressInitAwaiter) errorMessages() []string {
messages := make([]string, 0)
if _, ready := iia.checkIfEndpointsReady(); !ready {
messages = append(messages, "Ingress has at least one rule with an unavailable target endpoint.")
}
if !iia.ingressReady {
messages = append(messages,
"Ingress .status.loadBalancer field was not updated with a hostname/IP address. "+
"\n for more information about this error, see https://pulumi.io/xdv72s")
}
return messages
}
func (iia *ingressInitAwaiter) checkAndLogStatus() bool {
_, ready := iia.checkIfEndpointsReady()
success := iia.ingressReady && ready
if success {
iia.config.logStatus(diag.Info,
fmt.Sprintf("%sIngress initialization complete", cmdutil.EmojiOr("✅ ", "")))
} else if ready {
iia.config.logStatus(diag.Info, "[2/3] Waiting for update of .status.loadBalancer with hostname/IP")
}
return success
}
func (iia *ingressInitAwaiter) makeClients() (
ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error,
) {
ingressClient, err = clients.ResourceClient(
kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Ingress %q",
iia.config.currentInputs.GetName())
}
endpointsClient, err = clients.ResourceClient(
kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Endpoints associated with Ingress %q",
iia.config.currentInputs.GetName())
}
servicesClient, err = clients.ResourceClient(
kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Services associated with Ingress %q",
iia.config.currentInputs.GetName())
}
return
}
| {
continue
} | conditional_block |
ingress.go | // Copyright 2016-2022, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package await
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/await/informers"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/clients"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/kinds"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/metadata"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/openapi"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
logger "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
// ------------------------------------------------------------------------------------------------
// Await logic for extensions/v1beta1/Ingress.
//
// The goal of this code is to provide a fine-grained account of the status of a Kubernetes Ingress
// resource as it is being initialized. The idea is that if something goes wrong early, we want
// to alert the user so they can cancel the operation instead of waiting for timeout (~10 minutes).
//
// The design of this awaiter is fundamentally an event loop on four channels:
//
// 1. The Ingress channel, to which the Kubernetes API server will proactively push every change
// (additions, modifications, deletions) to any Ingress it knows about.
// 2. The Endpoint channel, which is the same idea as the Ingress channel, except it gets updates
// to Endpoint objects.
// 3. A timeout channel, which fires after some time.
// 4. A cancellation channel, with which the user can signal cancellation (e.g., using SIGINT).
//
// The `ingressInitAwaiter` will synchronously process events from the union of all these channels.
// Any time the success conditions described above a reached, we will terminate the awaiter.
//
// x-refs:
// * https://github.com/nginxinc/kubernetes-ingress/blob/5847d1f3906287d2771f3767d61c15ac02522caa/docs/report-ingress-status.md
// ------------------------------------------------------------------------------------------------
const (
DefaultIngressTimeoutMins = 10
)
type ingressInitAwaiter struct {
config createAwaitConfig
ingress *unstructured.Unstructured
ingressReady bool
endpointsSettled bool
endpointEventsCount uint64
knownEndpointObjects sets.String
knownExternalNameServices sets.String
}
func makeIngressInitAwaiter(c createAwaitConfig) *ingressInitAwaiter {
return &ingressInitAwaiter{
config: c,
ingress: c.currentOutputs,
ingressReady: false,
endpointsSettled: false,
knownEndpointObjects: sets.NewString(),
knownExternalNameServices: sets.NewString(),
}
}
func awaitIngressInit(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Await()
}
func awaitIngressRead(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Read()
}
func awaitIngressUpdate(u updateAwaitConfig) error {
return makeIngressInitAwaiter(u.createAwaitConfig).Await()
}
func (iia *ingressInitAwaiter) Await() error {
//
// We succeed only when all of the following are true:
//
// 1. Ingress object exists.
// 2. Endpoint objects exist with matching names for each Ingress path (except when Service
// type is ExternalName).
// 3. Ingress entry exists for .status.loadBalancer.ingress.
//
stopper := make(chan struct{})
defer close(stopper)
informerFactory := informers.NewInformerFactory(iia.config.clientSet,
informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace()))
informerFactory.Start(stopper)
ingressEvents := make(chan watch.Event)
ingressInformer, err := informers.New(informerFactory, informers.ForGVR(schema.GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) {
continue
}
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func expectedIngressPath(host, path, serviceName string) string {
rulePath := path
if host != "" {
rulePath = host + path
}
// It is valid for a user not to specify either a host or path [1]. In this case, any traffic not
// matching another rule is routed to the specified Service for this rule. Print
// `"" (default path)` to make this expectation clear to users.
//
// [1] https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#httpingresspath-v1beta1-extensions
if rulePath == "" {
rulePath = `"" (default path)`
} else {
rulePath = fmt.Sprintf("%q", rulePath)
}
// [host][path] -> serviceName
return fmt.Sprintf("%s -> %q", rulePath, serviceName)
}
func (iia *ingressInitAwaiter) processEndpointEvent(event watch.Event, settledCh chan<- struct{}) {
// Get endpoint object.
endpoint, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Endpoint watch received unknown object type %q",
reflect.TypeOf(endpoint))
return
}
iia.endpointEventsCount++
name := endpoint.GetName()
switch event.Type {
case watch.Added, watch.Modified:
iia.knownEndpointObjects.Insert(name)
case watch.Deleted:
iia.knownEndpointObjects.Delete(name)
// NOTE: Unlike `processServiceEvent` don't return; we still want to set
// `iia.endpointsSettled` to `false`.
}
// Every time we get an update to one of our endpoints objects, give it a few seconds
// for them to settle.
iia.endpointsSettled = false
go func() {
time.Sleep(10 * time.Second)
settledCh <- struct{}{}
}()
}
func (iia *ingressInitAwaiter) errorMessages() []string |
func (iia *ingressInitAwaiter) checkAndLogStatus() bool {
_, ready := iia.checkIfEndpointsReady()
success := iia.ingressReady && ready
if success {
iia.config.logStatus(diag.Info,
fmt.Sprintf("%sIngress initialization complete", cmdutil.EmojiOr("✅ ", "")))
} else if ready {
iia.config.logStatus(diag.Info, "[2/3] Waiting for update of .status.loadBalancer with hostname/IP")
}
return success
}
func (iia *ingressInitAwaiter) makeClients() (
ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error,
) {
ingressClient, err = clients.ResourceClient(
kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Ingress %q",
iia.config.currentInputs.GetName())
}
endpointsClient, err = clients.ResourceClient(
kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Endpoints associated with Ingress %q",
iia.config.currentInputs.GetName())
}
servicesClient, err = clients.ResourceClient(
kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Services associated with Ingress %q",
iia.config.currentInputs.GetName())
}
return
}
| {
messages := make([]string, 0)
if _, ready := iia.checkIfEndpointsReady(); !ready {
messages = append(messages, "Ingress has at least one rule with an unavailable target endpoint.")
}
if !iia.ingressReady {
messages = append(messages,
"Ingress .status.loadBalancer field was not updated with a hostname/IP address. "+
"\n for more information about this error, see https://pulumi.io/xdv72s")
}
return messages
} | identifier_body |
ingress.go | // Copyright 2016-2022, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package await
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/await/informers"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/clients"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/kinds"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/metadata"
"github.com/pulumi/pulumi-kubernetes/provider/v4/pkg/openapi"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
logger "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
// ------------------------------------------------------------------------------------------------
// Await logic for extensions/v1beta1/Ingress.
//
// The goal of this code is to provide a fine-grained account of the status of a Kubernetes Ingress
// resource as it is being initialized. The idea is that if something goes wrong early, we want
// to alert the user so they can cancel the operation instead of waiting for timeout (~10 minutes).
//
// The design of this awaiter is fundamentally an event loop on four channels:
//
// 1. The Ingress channel, to which the Kubernetes API server will proactively push every change
// (additions, modifications, deletions) to any Ingress it knows about.
// 2. The Endpoint channel, which is the same idea as the Ingress channel, except it gets updates
// to Endpoint objects.
// 3. A timeout channel, which fires after some time.
// 4. A cancellation channel, with which the user can signal cancellation (e.g., using SIGINT).
//
// The `ingressInitAwaiter` will synchronously process events from the union of all these channels.
// Any time the success conditions described above a reached, we will terminate the awaiter.
//
// x-refs:
// * https://github.com/nginxinc/kubernetes-ingress/blob/5847d1f3906287d2771f3767d61c15ac02522caa/docs/report-ingress-status.md
// ------------------------------------------------------------------------------------------------ | type ingressInitAwaiter struct {
config createAwaitConfig
ingress *unstructured.Unstructured
ingressReady bool
endpointsSettled bool
endpointEventsCount uint64
knownEndpointObjects sets.String
knownExternalNameServices sets.String
}
func makeIngressInitAwaiter(c createAwaitConfig) *ingressInitAwaiter {
return &ingressInitAwaiter{
config: c,
ingress: c.currentOutputs,
ingressReady: false,
endpointsSettled: false,
knownEndpointObjects: sets.NewString(),
knownExternalNameServices: sets.NewString(),
}
}
func awaitIngressInit(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Await()
}
func awaitIngressRead(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Read()
}
func awaitIngressUpdate(u updateAwaitConfig) error {
return makeIngressInitAwaiter(u.createAwaitConfig).Await()
}
func (iia *ingressInitAwaiter) Await() error {
//
// We succeed only when all of the following are true:
//
// 1. Ingress object exists.
// 2. Endpoint objects exist with matching names for each Ingress path (except when Service
// type is ExternalName).
// 3. Ingress entry exists for .status.loadBalancer.ingress.
//
stopper := make(chan struct{})
defer close(stopper)
informerFactory := informers.NewInformerFactory(iia.config.clientSet,
informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace()))
informerFactory.Start(stopper)
ingressEvents := make(chan watch.Event)
ingressInformer, err := informers.New(informerFactory, informers.ForGVR(schema.GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) {
continue
}
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func expectedIngressPath(host, path, serviceName string) string {
rulePath := path
if host != "" {
rulePath = host + path
}
// It is valid for a user not to specify either a host or path [1]. In this case, any traffic not
// matching another rule is routed to the specified Service for this rule. Print
// `"" (default path)` to make this expectation clear to users.
//
// [1] https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#httpingresspath-v1beta1-extensions
if rulePath == "" {
rulePath = `"" (default path)`
} else {
rulePath = fmt.Sprintf("%q", rulePath)
}
// [host][path] -> serviceName
return fmt.Sprintf("%s -> %q", rulePath, serviceName)
}
func (iia *ingressInitAwaiter) processEndpointEvent(event watch.Event, settledCh chan<- struct{}) {
// Get endpoint object.
endpoint, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Endpoint watch received unknown object type %q",
reflect.TypeOf(endpoint))
return
}
iia.endpointEventsCount++
name := endpoint.GetName()
switch event.Type {
case watch.Added, watch.Modified:
iia.knownEndpointObjects.Insert(name)
case watch.Deleted:
iia.knownEndpointObjects.Delete(name)
// NOTE: Unlike `processServiceEvent` don't return; we still want to set
// `iia.endpointsSettled` to `false`.
}
// Every time we get an update to one of our endpoints objects, give it a few seconds
// for them to settle.
iia.endpointsSettled = false
go func() {
time.Sleep(10 * time.Second)
settledCh <- struct{}{}
}()
}
func (iia *ingressInitAwaiter) errorMessages() []string {
messages := make([]string, 0)
if _, ready := iia.checkIfEndpointsReady(); !ready {
messages = append(messages, "Ingress has at least one rule with an unavailable target endpoint.")
}
if !iia.ingressReady {
messages = append(messages,
"Ingress .status.loadBalancer field was not updated with a hostname/IP address. "+
"\n for more information about this error, see https://pulumi.io/xdv72s")
}
return messages
}
func (iia *ingressInitAwaiter) checkAndLogStatus() bool {
_, ready := iia.checkIfEndpointsReady()
success := iia.ingressReady && ready
if success {
iia.config.logStatus(diag.Info,
fmt.Sprintf("%sIngress initialization complete", cmdutil.EmojiOr("✅ ", "")))
} else if ready {
iia.config.logStatus(diag.Info, "[2/3] Waiting for update of .status.loadBalancer with hostname/IP")
}
return success
}
func (iia *ingressInitAwaiter) makeClients() (
ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error,
) {
ingressClient, err = clients.ResourceClient(
kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Ingress %q",
iia.config.currentInputs.GetName())
}
endpointsClient, err = clients.ResourceClient(
kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Endpoints associated with Ingress %q",
iia.config.currentInputs.GetName())
}
servicesClient, err = clients.ResourceClient(
kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet)
if err != nil {
return nil, nil, nil, errors.Wrapf(err,
"Could not make client to watch Services associated with Ingress %q",
iia.config.currentInputs.GetName())
}
return
} |
const (
DefaultIngressTimeoutMins = 10
)
| random_line_split |
test.py | #!/usr/bin/env python
# **************************************************************************
# *
# * Authors: David Maluenda (dmaluenda@cnb.csic.es)
# * based on J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'scipion@cnb.csic.es'
# *
# **************************************************************************
import os
import time
import unittest
import threading
import subprocess
import sys
import shutil
from traceback import format_exception
import xmippLib
VAHID = "vahid"
RM = 'rmarabini'
COSS = 'coss'
JMRT = 'delarosatrevin'
JOTON = 'joton'
DISCONTINUED = 'nobody'
JMOTA = 'javimota'
class Command(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\033[93m "+text+"\033[0m"
def createDir(dirname, clean=False):
if clean and os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def visitTests(tests, grepStr=''): |
# First flatten the list of tests.
testsFlat = []
toCheck = [t for t in tests]
while toCheck:
test = toCheck.pop()
if isinstance(test, unittest.TestSuite):
toCheck += [t for t in test]
else:
if grepStr in str(type(test)):
testsFlat.append(test)
testsFlat.sort()
# Follow the flattened list of tests and show the module, class
# and name, in a nice way.
lastClass = None
lastModule = None
grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)
for t in testsFlat:
moduleName, className, testName = t.id().rsplit('.', 2)
# If there is a failure loading the test, show it
if moduleName.startswith('unittest.loader.ModuleImportFailure'):
print red(moduleName), " test:", t.id()
continue
if moduleName != lastModule:
lastModule = moduleName
print(" - From %s.py (to run all use --allPrograms)"
% '/'.join(moduleName.split('.')) + grepPrint)
if className != lastClass:
lastClass = className
print(" ./xmipp test %s" % className)
if __name__ == "__main__":
testNames = sys.argv[1:]
cTests = subprocess.check_output('compgen -ac | grep xmipp_test_', shell=True,
executable='/bin/bash').splitlines()
tests = unittest.TestSuite()
if '--show' in testNames or '--allPrograms' in testNames:
# tests.addTests(unittest.defaultTestLoader.discover(os.environ.get("XMIPP_TEST_DATA")+'/..',
# pattern='test*.py'))#,top_level_dir=os.environ.get("XMIPP_TEST_DATA")+'/..'))
listDir = os.listdir(os.environ.get("XMIPP_TEST_DATA")+'/..')
# print listDir
for path in listDir:
if path.startswith('test_') and path.endswith('.py'):
tests.addTests(unittest.defaultTestLoader.loadTestsFromName('tests.' + path[:-3]))
if '--show' in testNames:
print(blue("\n > > You can run any of the following tests by:\n"))
grepStr = '' if len(testNames)<2 else testNames[1]
visitTests(tests, grepStr)
print("\n - From applications/function_tests (to run all use --allFuncs):")
for test in cTests:
print(" %s" % test)
elif '--allPrograms' in testNames:
result = GTestResult()
tests.run(result)
result.doReport()
elif '--allFuncs' in testNames:
xmippBinDir = os.path.join(os.environ.get("XMIPP_SRC"), 'xmipp', 'bin')
errors = []
startTimeAll = time.time()
for test in cTests:
sys.stdout.write(blue("\n\n>> Running %s:\n" % test))
sys.stdout.flush()
result = os.system(test)
sys.stdout.flush()
if result != 0:
errors.append(test)
secs = time.time() - startTimeAll
sys.stderr.write(blue("\n -- End of all function tests -- \n\n"))
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), len(cTests), secs))
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (len(cTests) - len(errors))
sys.stdout.flush()
if errors:
print >> sys.stderr, red("[ FAILED ]") + " %d tests:" % len(errors)
for fail in errors:
print(red(" - %s" % fail))
sys.stdout.flush()
else:
for test in testNames:
test = 'tests.test_programs_xmipp.' + test
try:
tests.addTests(unittest.defaultTestLoader.loadTestsFromName(test))
except Exception as e:
print(red('Cannot find test %s -- skipping') % test)
print('error: %s' % e)
result = GTestResult()
tests.run(result)
result.doReport() | """ Show the list of tests available """ | random_line_split |
test.py | #!/usr/bin/env python
# **************************************************************************
# *
# * Authors: David Maluenda (dmaluenda@cnb.csic.es)
# * based on J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'scipion@cnb.csic.es'
# *
# **************************************************************************
import os
import time
import unittest
import threading
import subprocess
import sys
import shutil
from traceback import format_exception
import xmippLib
VAHID = "vahid"
RM = 'rmarabini'
COSS = 'coss'
JMRT = 'delarosatrevin'
JOTON = 'joton'
DISCONTINUED = 'nobody'
JMOTA = 'javimota'
class Command(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
|
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\033[93m "+text+"\033[0m"
def createDir(dirname, clean=False):
if clean and os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def visitTests(tests, grepStr=''):
""" Show the list of tests available """
# First flatten the list of tests.
testsFlat = []
toCheck = [t for t in tests]
while toCheck:
test = toCheck.pop()
if isinstance(test, unittest.TestSuite):
toCheck += [t for t in test]
else:
if grepStr in str(type(test)):
testsFlat.append(test)
testsFlat.sort()
# Follow the flattened list of tests and show the module, class
# and name, in a nice way.
lastClass = None
lastModule = None
grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)
for t in testsFlat:
moduleName, className, testName = t.id().rsplit('.', 2)
# If there is a failure loading the test, show it
if moduleName.startswith('unittest.loader.ModuleImportFailure'):
print red(moduleName), " test:", t.id()
continue
if moduleName != lastModule:
lastModule = moduleName
print(" - From %s.py (to run all use --allPrograms)"
% '/'.join(moduleName.split('.')) + grepPrint)
if className != lastClass:
lastClass = className
print(" ./xmipp test %s" % className)
if __name__ == "__main__":
testNames = sys.argv[1:]
cTests = subprocess.check_output('compgen -ac | grep xmipp_test_', shell=True,
executable='/bin/bash').splitlines()
tests = unittest.TestSuite()
if '--show' in testNames or '--allPrograms' in testNames:
# tests.addTests(unittest.defaultTestLoader.discover(os.environ.get("XMIPP_TEST_DATA")+'/..',
# pattern='test*.py'))#,top_level_dir=os.environ.get("XMIPP_TEST_DATA")+'/..'))
listDir = os.listdir(os.environ.get("XMIPP_TEST_DATA")+'/..')
# print listDir
for path in listDir:
if path.startswith('test_') and path.endswith('.py'):
tests.addTests(unittest.defaultTestLoader.loadTestsFromName('tests.' + path[:-3]))
if '--show' in testNames:
print(blue("\n > > You can run any of the following tests by:\n"))
grepStr = '' if len(testNames)<2 else testNames[1]
visitTests(tests, grepStr)
print("\n - From applications/function_tests (to run all use --allFuncs):")
for test in cTests:
print(" %s" % test)
elif '--allPrograms' in testNames:
result = GTestResult()
tests.run(result)
result.doReport()
elif '--allFuncs' in testNames:
xmippBinDir = os.path.join(os.environ.get("XMIPP_SRC"), 'xmipp', 'bin')
errors = []
startTimeAll = time.time()
for test in cTests:
sys.stdout.write(blue("\n\n>> Running %s:\n" % test))
sys.stdout.flush()
result = os.system(test)
sys.stdout.flush()
if result != 0:
errors.append(test)
secs = time.time() - startTimeAll
sys.stderr.write(blue("\n -- End of all function tests -- \n\n"))
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), len(cTests), secs))
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (len(cTests) - len(errors))
sys.stdout.flush()
if errors:
print >> sys.stderr, red("[ FAILED ]") + " %d tests:" % len(errors)
for fail in errors:
print(red(" - %s" % fail))
sys.stdout.flush()
else:
for test in testNames:
test = 'tests.test_programs_xmipp.' + test
try:
tests.addTests(unittest.defaultTestLoader.loadTestsFromName(test))
except Exception as e:
print(red('Cannot find test %s -- skipping') % test)
print('error: %s' % e)
result = GTestResult()
tests.run(result)
result.doReport() | """ Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush() | identifier_body |
test.py | #!/usr/bin/env python
# **************************************************************************
# *
# * Authors: David Maluenda (dmaluenda@cnb.csic.es)
# * based on J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'scipion@cnb.csic.es'
# *
# **************************************************************************
import os
import time
import unittest
import threading
import subprocess
import sys
import shutil
from traceback import format_exception
import xmippLib
VAHID = "vahid"
RM = 'rmarabini'
COSS = 'coss'
JMRT = 'delarosatrevin'
JOTON = 'joton'
DISCONTINUED = 'nobody'
JMOTA = 'javimota'
class Command(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
|
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\033[93m "+text+"\033[0m"
def createDir(dirname, clean=False):
if clean and os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def visitTests(tests, grepStr=''):
""" Show the list of tests available """
# First flatten the list of tests.
testsFlat = []
toCheck = [t for t in tests]
while toCheck:
test = toCheck.pop()
if isinstance(test, unittest.TestSuite):
toCheck += [t for t in test]
else:
if grepStr in str(type(test)):
testsFlat.append(test)
testsFlat.sort()
# Follow the flattened list of tests and show the module, class
# and name, in a nice way.
lastClass = None
lastModule = None
grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)
for t in testsFlat:
moduleName, className, testName = t.id().rsplit('.', 2)
# If there is a failure loading the test, show it
if moduleName.startswith('unittest.loader.ModuleImportFailure'):
print red(moduleName), " test:", t.id()
continue
if moduleName != lastModule:
lastModule = moduleName
print(" - From %s.py (to run all use --allPrograms)"
% '/'.join(moduleName.split('.')) + grepPrint)
if className != lastClass:
lastClass = className
print(" ./xmipp test %s" % className)
if __name__ == "__main__":
testNames = sys.argv[1:]
cTests = subprocess.check_output('compgen -ac | grep xmipp_test_', shell=True,
executable='/bin/bash').splitlines()
tests = unittest.TestSuite()
if '--show' in testNames or '--allPrograms' in testNames:
# tests.addTests(unittest.defaultTestLoader.discover(os.environ.get("XMIPP_TEST_DATA")+'/..',
# pattern='test*.py'))#,top_level_dir=os.environ.get("XMIPP_TEST_DATA")+'/..'))
listDir = os.listdir(os.environ.get("XMIPP_TEST_DATA")+'/..')
# print listDir
for path in listDir:
if path.startswith('test_') and path.endswith('.py'):
tests.addTests(unittest.defaultTestLoader.loadTestsFromName('tests.' + path[:-3]))
if '--show' in testNames:
print(blue("\n > > You can run any of the following tests by:\n"))
grepStr = '' if len(testNames)<2 else testNames[1]
visitTests(tests, grepStr)
print("\n - From applications/function_tests (to run all use --allFuncs):")
for test in cTests:
print(" %s" % test)
elif '--allPrograms' in testNames:
result = GTestResult()
tests.run(result)
result.doReport()
elif '--allFuncs' in testNames:
xmippBinDir = os.path.join(os.environ.get("XMIPP_SRC"), 'xmipp', 'bin')
errors = []
startTimeAll = time.time()
for test in cTests:
sys.stdout.write(blue("\n\n>> Running %s:\n" % test))
sys.stdout.flush()
result = os.system(test)
sys.stdout.flush()
if result != 0:
errors.append(test)
secs = time.time() - startTimeAll
sys.stderr.write(blue("\n -- End of all function tests -- \n\n"))
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), len(cTests), secs))
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (len(cTests) - len(errors))
sys.stdout.flush()
if errors:
print >> sys.stderr, red("[ FAILED ]") + " %d tests:" % len(errors)
for fail in errors:
print(red(" - %s" % fail))
sys.stdout.flush()
else:
for test in testNames:
test = 'tests.test_programs_xmipp.' + test
try:
tests.addTests(unittest.defaultTestLoader.loadTestsFromName(test))
except Exception as e:
print(red('Cannot find test %s -- skipping') % test)
print('error: %s' % e)
result = GTestResult()
tests.run(result)
result.doReport() | outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg)) | conditional_block |
test.py | #!/usr/bin/env python
# **************************************************************************
# *
# * Authors: David Maluenda (dmaluenda@cnb.csic.es)
# * based on J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'scipion@cnb.csic.es'
# *
# **************************************************************************
import os
import time
import unittest
import threading
import subprocess
import sys
import shutil
from traceback import format_exception
import xmippLib
VAHID = "vahid"
RM = 'rmarabini'
COSS = 'coss'
JMRT = 'delarosatrevin'
JOTON = 'joton'
DISCONTINUED = 'nobody'
JMOTA = 'javimota'
class Command(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def | (self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\033[93m "+text+"\033[0m"
def createDir(dirname, clean=False):
if clean and os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def visitTests(tests, grepStr=''):
""" Show the list of tests available """
# First flatten the list of tests.
testsFlat = []
toCheck = [t for t in tests]
while toCheck:
test = toCheck.pop()
if isinstance(test, unittest.TestSuite):
toCheck += [t for t in test]
else:
if grepStr in str(type(test)):
testsFlat.append(test)
testsFlat.sort()
# Follow the flattened list of tests and show the module, class
# and name, in a nice way.
lastClass = None
lastModule = None
grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)
for t in testsFlat:
moduleName, className, testName = t.id().rsplit('.', 2)
# If there is a failure loading the test, show it
if moduleName.startswith('unittest.loader.ModuleImportFailure'):
print red(moduleName), " test:", t.id()
continue
if moduleName != lastModule:
lastModule = moduleName
print(" - From %s.py (to run all use --allPrograms)"
% '/'.join(moduleName.split('.')) + grepPrint)
if className != lastClass:
lastClass = className
print(" ./xmipp test %s" % className)
if __name__ == "__main__":
testNames = sys.argv[1:]
cTests = subprocess.check_output('compgen -ac | grep xmipp_test_', shell=True,
executable='/bin/bash').splitlines()
tests = unittest.TestSuite()
if '--show' in testNames or '--allPrograms' in testNames:
# tests.addTests(unittest.defaultTestLoader.discover(os.environ.get("XMIPP_TEST_DATA")+'/..',
# pattern='test*.py'))#,top_level_dir=os.environ.get("XMIPP_TEST_DATA")+'/..'))
listDir = os.listdir(os.environ.get("XMIPP_TEST_DATA")+'/..')
# print listDir
for path in listDir:
if path.startswith('test_') and path.endswith('.py'):
tests.addTests(unittest.defaultTestLoader.loadTestsFromName('tests.' + path[:-3]))
if '--show' in testNames:
print(blue("\n > > You can run any of the following tests by:\n"))
grepStr = '' if len(testNames)<2 else testNames[1]
visitTests(tests, grepStr)
print("\n - From applications/function_tests (to run all use --allFuncs):")
for test in cTests:
print(" %s" % test)
elif '--allPrograms' in testNames:
result = GTestResult()
tests.run(result)
result.doReport()
elif '--allFuncs' in testNames:
xmippBinDir = os.path.join(os.environ.get("XMIPP_SRC"), 'xmipp', 'bin')
errors = []
startTimeAll = time.time()
for test in cTests:
sys.stdout.write(blue("\n\n>> Running %s:\n" % test))
sys.stdout.flush()
result = os.system(test)
sys.stdout.flush()
if result != 0:
errors.append(test)
secs = time.time() - startTimeAll
sys.stderr.write(blue("\n -- End of all function tests -- \n\n"))
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), len(cTests), secs))
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (len(cTests) - len(errors))
sys.stdout.flush()
if errors:
print >> sys.stderr, red("[ FAILED ]") + " %d tests:" % len(errors)
for fail in errors:
print(red(" - %s" % fail))
sys.stdout.flush()
else:
for test in testNames:
test = 'tests.test_programs_xmipp.' + test
try:
tests.addTests(unittest.defaultTestLoader.loadTestsFromName(test))
except Exception as e:
print(red('Cannot find test %s -- skipping') % test)
print('error: %s' % e)
result = GTestResult()
tests.run(result)
result.doReport() | doReport | identifier_name |
DLProject_DNN.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 19:12:51 2018
@author: ayoujiljad
"""
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import tensorflow as tf
import time
import os
import numpy as np
import pandas as pd
import numpy as np
import math
import random
from numpy import linalg as LA
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
## Changing working directory
cwd = os.getcwd()
Path = input("What path are we going to use : ")
if (Path == "VM") :
path = '/home/ja3291/'
file_path = 'ESZ2018.csv'
elif (Path == ""):
path = '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/'
file_path = 'ESZ2018.csv'
else:
f |
os.chdir(path)
cwd = os.getcwd()
# 256 neurons in each hidden layers
n_hidden_1 = 400
n_hidden_2 = 300
n_hidden_3 = 200
n_hidden_4 = 100
n_hidden_5 = 50
# There are 10 levels, and we consider 40 timestamps and the mid_price
# output size
input_size = 400
output_size = 3
# Parameters
learning_rate = 0.001
training_epochs = 2500
batch_size = 32
display_step = 1
test_size=0.2
start_time = time.time()
# number of previous timestamps used for prediction
n_times = 40
# number of quote levels used for prediction
n_levels = 10
split=0.7
#Labels to extract from data
L_labels = ['Date', 'Time', 'L1-BidPrice', 'L1-BidSize', 'L1-BuyNo', 'L1-AskPrice',
'L1-AskSize', 'L1-SellNo', 'L2-BidPrice', 'L2-BidSize', 'L2-BuyNo',
'L2-AskPrice', 'L2-AskSize', 'L2-SellNo', 'L3-BidPrice', 'L3-BidSize',
'L3-BuyNo', 'L3-AskPrice', 'L3-AskSize', 'L3-SellNo', 'L4-BidPrice',
'L4-BidSize', 'L4-BuyNo', 'L4-AskPrice', 'L4-AskSize', 'L4-SellNo',
'L5-BidPrice', 'L5-BidSize', 'L5-BuyNo', 'L5-AskPrice', 'L5-AskSize',
'L5-SellNo', 'L6-BidPrice', 'L6-BidSize', 'L6-BuyNo', 'L6-AskPrice',
'L6-AskSize', 'L6-SellNo', 'L7-BidPrice', 'L7-BidSize', 'L7-BuyNo',
'L7-AskPrice', 'L7-AskSize', 'L7-SellNo', 'L8-BidPrice', 'L8-BidSize',
'L8-BuyNo', 'L8-AskPrice', 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def diff(x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = []
for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i)
if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.softmax(tf.matmul(x, W) + b)
def layer2(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.relu(tf.matmul(x, W) + b)
def inference(x):
"""
define the whole network (5 hidden layers + output layers)
input:
- a batch of pictures
(input shape = (batch_size*image_size))
output:
- a batch vector corresponding to the logits predicted by the network
(output shape = (batch_size*output_size))
"""
print(type(x))
print(np.shape(x))
print(x)
with tf.variable_scope("hidden_layer_1"):
hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])
#print([input_size, n_hidden_1])
with tf.variable_scope("hidden_layer_2"):
hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])
#print([n_hidden_1, n_hidden_2])
with tf.variable_scope("hidden_layer_3"):
hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])
#print([n_hidden_2, n_hidden_3])
with tf.variable_scope("hidden_layer_4"):
hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])
#print([n_hidden_3, n_hidden_4])
with tf.variable_scope("hidden_layer_5"):
hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])
#print([n_hidden_4, n_hidden_5])
with tf.variable_scope("output"):
output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])
#print([n_hidden_5, output_size])
return output
def loss(output, y):
"""
Computes softmax cross entropy between logits and labels and then the loss
intput:
- output: the output of the inference function
- y: true value of the sample batch
the two have the same shape (batch_size * num_of_classes)
output:
- loss: loss of the corresponding batch (scalar tensor)
"""
#Computes softmax cross entropy between logits and labels.
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)
loss = tf.reduce_mean(xentropy)
return loss
def training(cost, global_step):
"""
defines the necessary elements to train the network
intput:
- cost: the cost is the loss of the corresponding batch
- global_step: number of batch seen so far, it is incremented by one each time the .minimize() function is called
"""
tf.summary.scalar("cost", cost)
# using Adam Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(cost, global_step=global_step)
return train_op
def evaluate(output, y):
"""
evaluates the accuracy on the validation set
input:
-output: prediction vector of the network for the validation set
-y: true value for the validation set
output:
- accuracy: accuracy on the validation set (scalar between 0 and 1)
"""
#correct prediction is a binary vector which equals one when the output and y match
#otherwise the vector equals 0
#tf.cast: change the type of a tensor into another one
#then, by taking the mean of the tensor, we directly have the average score, so the accuracy
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("validation_error", (1.0 - accuracy))
return accuracy
epochs_completed = 0
index_in_epoch = 0
num_examples = X_train.shape[0]
# for splitting out batches of data
def next_batch(batch_size):
global X_train
global y_train
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return X_train[start:end], y_train[start:end]
if __name__ == '__main__':
with tf.Graph().as_default():
with tf.variable_scope("MNIST_convoultional_model"):
x = tf.placeholder("float", [None, 400])
y = tf.placeholder("float", [None,3])
output = inference(x)
cost = loss(output, y)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = training(cost, global_step)
eval_op = evaluate(output, y)
summary_op = tf.summary.merge_all()
sess = tf.Session()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(file_path, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(df3)/batch_size)
# Loop over all batches
for i in range(total_batch):
minibatch_x, minibatch_y = next_batch(batch_size)
#minibatch_y=minibatch_y.reshape(batch_size ,1)
# Fit training using batch data
sess.run(train_op, feed_dict={x: minibatch_x, y: minibatch_y})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: minibatch_x, y: minibatch_y})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:0.9f}".format(avg_cost))
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Validation Error:", (1 - accuracy))
summary_str = sess.run(summary_op, feed_dict={x: minibatch_x, y: minibatch_y})
summary_writer.add_summary(summary_str, sess.run(global_step))
saver.save(sess, '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/test_model')
print("Optimization Done")
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Test Accuracy:", accuracy)
elapsed_time = time.time() - start_time
print('Execution time was %0.3f' % elapsed_time) | ile_path = Path | conditional_block |
DLProject_DNN.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 19:12:51 2018
@author: ayoujiljad
"""
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import tensorflow as tf
import time
import os
import numpy as np
import pandas as pd
import numpy as np
import math
import random
from numpy import linalg as LA
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
## Changing working directory
cwd = os.getcwd()
Path = input("What path are we going to use : ")
if (Path == "VM") :
path = '/home/ja3291/'
file_path = 'ESZ2018.csv'
elif (Path == ""):
path = '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/'
file_path = 'ESZ2018.csv'
else:
file_path = Path
os.chdir(path)
cwd = os.getcwd()
# 256 neurons in each hidden layers
n_hidden_1 = 400
n_hidden_2 = 300
n_hidden_3 = 200
n_hidden_4 = 100
n_hidden_5 = 50
# There are 10 levels, and we consider 40 timestamps and the mid_price
# output size
input_size = 400
output_size = 3
# Parameters
learning_rate = 0.001
training_epochs = 2500
batch_size = 32
display_step = 1
test_size=0.2
start_time = time.time()
# number of previous timestamps used for prediction
n_times = 40
# number of quote levels used for prediction
n_levels = 10
split=0.7
#Labels to extract from data
L_labels = ['Date', 'Time', 'L1-BidPrice', 'L1-BidSize', 'L1-BuyNo', 'L1-AskPrice',
'L1-AskSize', 'L1-SellNo', 'L2-BidPrice', 'L2-BidSize', 'L2-BuyNo',
'L2-AskPrice', 'L2-AskSize', 'L2-SellNo', 'L3-BidPrice', 'L3-BidSize',
'L3-BuyNo', 'L3-AskPrice', 'L3-AskSize', 'L3-SellNo', 'L4-BidPrice',
'L4-BidSize', 'L4-BuyNo', 'L4-AskPrice', 'L4-AskSize', 'L4-SellNo',
'L5-BidPrice', 'L5-BidSize', 'L5-BuyNo', 'L5-AskPrice', 'L5-AskSize',
'L5-SellNo', 'L6-BidPrice', 'L6-BidSize', 'L6-BuyNo', 'L6-AskPrice',
'L6-AskSize', 'L6-SellNo', 'L7-BidPrice', 'L7-BidSize', 'L7-BuyNo',
'L7-AskPrice', 'L7-AskSize', 'L7-SellNo', 'L8-BidPrice', 'L8-BidSize',
'L8-BuyNo', 'L8-AskPrice', 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def d | x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = []
for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i)
if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.softmax(tf.matmul(x, W) + b)
def layer2(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.relu(tf.matmul(x, W) + b)
def inference(x):
"""
define the whole network (5 hidden layers + output layers)
input:
- a batch of pictures
(input shape = (batch_size*image_size))
output:
- a batch vector corresponding to the logits predicted by the network
(output shape = (batch_size*output_size))
"""
print(type(x))
print(np.shape(x))
print(x)
with tf.variable_scope("hidden_layer_1"):
hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])
#print([input_size, n_hidden_1])
with tf.variable_scope("hidden_layer_2"):
hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])
#print([n_hidden_1, n_hidden_2])
with tf.variable_scope("hidden_layer_3"):
hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])
#print([n_hidden_2, n_hidden_3])
with tf.variable_scope("hidden_layer_4"):
hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])
#print([n_hidden_3, n_hidden_4])
with tf.variable_scope("hidden_layer_5"):
hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])
#print([n_hidden_4, n_hidden_5])
with tf.variable_scope("output"):
output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])
#print([n_hidden_5, output_size])
return output
def loss(output, y):
"""
Computes softmax cross entropy between logits and labels and then the loss
intput:
- output: the output of the inference function
- y: true value of the sample batch
the two have the same shape (batch_size * num_of_classes)
output:
- loss: loss of the corresponding batch (scalar tensor)
"""
#Computes softmax cross entropy between logits and labels.
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)
loss = tf.reduce_mean(xentropy)
return loss
def training(cost, global_step):
"""
defines the necessary elements to train the network
intput:
- cost: the cost is the loss of the corresponding batch
- global_step: number of batch seen so far, it is incremented by one each time the .minimize() function is called
"""
tf.summary.scalar("cost", cost)
# using Adam Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(cost, global_step=global_step)
return train_op
def evaluate(output, y):
"""
evaluates the accuracy on the validation set
input:
-output: prediction vector of the network for the validation set
-y: true value for the validation set
output:
- accuracy: accuracy on the validation set (scalar between 0 and 1)
"""
#correct prediction is a binary vector which equals one when the output and y match
#otherwise the vector equals 0
#tf.cast: change the type of a tensor into another one
#then, by taking the mean of the tensor, we directly have the average score, so the accuracy
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("validation_error", (1.0 - accuracy))
return accuracy
epochs_completed = 0
index_in_epoch = 0
num_examples = X_train.shape[0]
# for splitting out batches of data
def next_batch(batch_size):
global X_train
global y_train
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return X_train[start:end], y_train[start:end]
if __name__ == '__main__':
with tf.Graph().as_default():
with tf.variable_scope("MNIST_convoultional_model"):
x = tf.placeholder("float", [None, 400])
y = tf.placeholder("float", [None,3])
output = inference(x)
cost = loss(output, y)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = training(cost, global_step)
eval_op = evaluate(output, y)
summary_op = tf.summary.merge_all()
sess = tf.Session()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(file_path, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(df3)/batch_size)
# Loop over all batches
for i in range(total_batch):
minibatch_x, minibatch_y = next_batch(batch_size)
#minibatch_y=minibatch_y.reshape(batch_size ,1)
# Fit training using batch data
sess.run(train_op, feed_dict={x: minibatch_x, y: minibatch_y})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: minibatch_x, y: minibatch_y})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:0.9f}".format(avg_cost))
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Validation Error:", (1 - accuracy))
summary_str = sess.run(summary_op, feed_dict={x: minibatch_x, y: minibatch_y})
summary_writer.add_summary(summary_str, sess.run(global_step))
saver.save(sess, '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/test_model')
print("Optimization Done")
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Test Accuracy:", accuracy)
elapsed_time = time.time() - start_time
print('Execution time was %0.3f' % elapsed_time) | iff( | identifier_name |
DLProject_DNN.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 19:12:51 2018
@author: ayoujiljad
"""
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import tensorflow as tf
import time
import os
import numpy as np
import pandas as pd
import numpy as np
import math
import random
from numpy import linalg as LA
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
## Changing working directory
cwd = os.getcwd()
Path = input("What path are we going to use : ")
if (Path == "VM") :
path = '/home/ja3291/'
file_path = 'ESZ2018.csv'
elif (Path == ""):
path = '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/'
file_path = 'ESZ2018.csv'
else:
file_path = Path
os.chdir(path)
cwd = os.getcwd()
# 256 neurons in each hidden layers
n_hidden_1 = 400
n_hidden_2 = 300
n_hidden_3 = 200
n_hidden_4 = 100
n_hidden_5 = 50
# There are 10 levels, and we consider 40 timestamps and the mid_price
# output size
input_size = 400
output_size = 3
# Parameters
learning_rate = 0.001
training_epochs = 2500
batch_size = 32
display_step = 1
test_size=0.2
start_time = time.time()
# number of previous timestamps used for prediction
n_times = 40
# number of quote levels used for prediction
n_levels = 10
split=0.7
#Labels to extract from data
L_labels = ['Date', 'Time', 'L1-BidPrice', 'L1-BidSize', 'L1-BuyNo', 'L1-AskPrice',
'L1-AskSize', 'L1-SellNo', 'L2-BidPrice', 'L2-BidSize', 'L2-BuyNo',
'L2-AskPrice', 'L2-AskSize', 'L2-SellNo', 'L3-BidPrice', 'L3-BidSize',
'L3-BuyNo', 'L3-AskPrice', 'L3-AskSize', 'L3-SellNo', 'L4-BidPrice',
'L4-BidSize', 'L4-BuyNo', 'L4-AskPrice', 'L4-AskSize', 'L4-SellNo',
'L5-BidPrice', 'L5-BidSize', 'L5-BuyNo', 'L5-AskPrice', 'L5-AskSize',
'L5-SellNo', 'L6-BidPrice', 'L6-BidSize', 'L6-BuyNo', 'L6-AskPrice',
'L6-AskSize', 'L6-SellNo', 'L7-BidPrice', 'L7-BidSize', 'L7-BuyNo',
'L7-AskPrice', 'L7-AskSize', 'L7-SellNo', 'L8-BidPrice', 'L8-BidSize',
'L8-BuyNo', 'L8-AskPrice', 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def diff(x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = []
for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i)
if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.softmax(tf.matmul(x, W) + b)
def layer2(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.relu(tf.matmul(x, W) + b)
def inference(x):
"""
define the whole network (5 hidden layers + output layers)
input:
- a batch of pictures
(input shape = (batch_size*image_size))
output:
- a batch vector corresponding to the logits predicted by the network
(output shape = (batch_size*output_size))
"""
print(type(x))
print(np.shape(x))
print(x)
with tf.variable_scope("hidden_layer_1"):
hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])
#print([input_size, n_hidden_1])
with tf.variable_scope("hidden_layer_2"):
hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])
#print([n_hidden_1, n_hidden_2])
with tf.variable_scope("hidden_layer_3"):
hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])
#print([n_hidden_2, n_hidden_3])
with tf.variable_scope("hidden_layer_4"):
hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])
#print([n_hidden_3, n_hidden_4])
with tf.variable_scope("hidden_layer_5"):
hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])
#print([n_hidden_4, n_hidden_5])
with tf.variable_scope("output"):
output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])
#print([n_hidden_5, output_size])
return output
def loss(output, y):
"""
Computes softmax cross entropy between logits and labels and then the loss
intput:
- output: the output of the inference function
- y: true value of the sample batch
the two have the same shape (batch_size * num_of_classes)
output:
- loss: loss of the corresponding batch (scalar tensor)
"""
#Computes softmax cross entropy between logits and labels.
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)
loss = tf.reduce_mean(xentropy)
return loss
def training(cost, global_step):
"""
defines the necessary elements to train the network
intput:
- cost: the cost is the loss of the corresponding batch
- global_step: number of batch seen so far, it is incremented by one each time the .minimize() function is called
"""
tf.summary.scalar("cost", cost)
# using Adam Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(cost, global_step=global_step)
return train_op
def evaluate(output, y):
"""
evaluates the accuracy on the validation set
input:
-output: prediction vector of the network for the validation set
-y: true value for the validation set
output:
- accuracy: accuracy on the validation set (scalar between 0 and 1)
"""
#correct prediction is a binary vector which equals one when the output and y match
#otherwise the vector equals 0
#tf.cast: change the type of a tensor into another one
#then, by taking the mean of the tensor, we directly have the average score, so the accuracy
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("validation_error", (1.0 - accuracy))
return accuracy
epochs_completed = 0
index_in_epoch = 0
num_examples = X_train.shape[0]
# for splitting out batches of data
def next_batch(batch_size):
g |
if __name__ == '__main__':
with tf.Graph().as_default():
with tf.variable_scope("MNIST_convoultional_model"):
x = tf.placeholder("float", [None, 400])
y = tf.placeholder("float", [None,3])
output = inference(x)
cost = loss(output, y)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = training(cost, global_step)
eval_op = evaluate(output, y)
summary_op = tf.summary.merge_all()
sess = tf.Session()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(file_path, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(df3)/batch_size)
# Loop over all batches
for i in range(total_batch):
minibatch_x, minibatch_y = next_batch(batch_size)
#minibatch_y=minibatch_y.reshape(batch_size ,1)
# Fit training using batch data
sess.run(train_op, feed_dict={x: minibatch_x, y: minibatch_y})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: minibatch_x, y: minibatch_y})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:0.9f}".format(avg_cost))
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Validation Error:", (1 - accuracy))
summary_str = sess.run(summary_op, feed_dict={x: minibatch_x, y: minibatch_y})
summary_writer.add_summary(summary_str, sess.run(global_step))
saver.save(sess, '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/test_model')
print("Optimization Done")
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Test Accuracy:", accuracy)
elapsed_time = time.time() - start_time
print('Execution time was %0.3f' % elapsed_time) | lobal X_train
global y_train
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return X_train[start:end], y_train[start:end]
| identifier_body |
DLProject_DNN.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 19:12:51 2018
@author: ayoujiljad
"""
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import tensorflow as tf
import time
import os
import numpy as np
import pandas as pd
import numpy as np
import math
import random
from numpy import linalg as LA
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
## Changing working directory
cwd = os.getcwd()
Path = input("What path are we going to use : ")
if (Path == "VM") :
path = '/home/ja3291/'
file_path = 'ESZ2018.csv'
elif (Path == ""):
path = '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/'
file_path = 'ESZ2018.csv'
else:
file_path = Path
os.chdir(path)
cwd = os.getcwd()
# 256 neurons in each hidden layers
n_hidden_1 = 400
n_hidden_2 = 300
n_hidden_3 = 200
n_hidden_4 = 100
n_hidden_5 = 50
# There are 10 levels, and we consider 40 timestamps and the mid_price
# output size
input_size = 400
output_size = 3
# Parameters
learning_rate = 0.001
training_epochs = 2500
batch_size = 32
display_step = 1
test_size=0.2
start_time = time.time()
# number of previous timestamps used for prediction
n_times = 40
# number of quote levels used for prediction
n_levels = 10
split=0.7
#Labels to extract from data
L_labels = ['Date', 'Time', 'L1-BidPrice', 'L1-BidSize', 'L1-BuyNo', 'L1-AskPrice',
'L1-AskSize', 'L1-SellNo', 'L2-BidPrice', 'L2-BidSize', 'L2-BuyNo',
'L2-AskPrice', 'L2-AskSize', 'L2-SellNo', 'L3-BidPrice', 'L3-BidSize',
'L3-BuyNo', 'L3-AskPrice', 'L3-AskSize', 'L3-SellNo', 'L4-BidPrice',
'L4-BidSize', 'L4-BuyNo', 'L4-AskPrice', 'L4-AskSize', 'L4-SellNo',
'L5-BidPrice', 'L5-BidSize', 'L5-BuyNo', 'L5-AskPrice', 'L5-AskSize',
'L5-SellNo', 'L6-BidPrice', 'L6-BidSize', 'L6-BuyNo', 'L6-AskPrice',
'L6-AskSize', 'L6-SellNo', 'L7-BidPrice', 'L7-BidSize', 'L7-BuyNo',
'L7-AskPrice', 'L7-AskSize', 'L7-SellNo', 'L8-BidPrice', 'L8-BidSize',
'L8-BuyNo', 'L8-AskPrice', 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def diff(x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = [] | if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.softmax(tf.matmul(x, W) + b)
def layer2(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.relu(tf.matmul(x, W) + b)
def inference(x):
"""
define the whole network (5 hidden layers + output layers)
input:
- a batch of pictures
(input shape = (batch_size*image_size))
output:
- a batch vector corresponding to the logits predicted by the network
(output shape = (batch_size*output_size))
"""
print(type(x))
print(np.shape(x))
print(x)
with tf.variable_scope("hidden_layer_1"):
hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])
#print([input_size, n_hidden_1])
with tf.variable_scope("hidden_layer_2"):
hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])
#print([n_hidden_1, n_hidden_2])
with tf.variable_scope("hidden_layer_3"):
hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])
#print([n_hidden_2, n_hidden_3])
with tf.variable_scope("hidden_layer_4"):
hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])
#print([n_hidden_3, n_hidden_4])
with tf.variable_scope("hidden_layer_5"):
hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])
#print([n_hidden_4, n_hidden_5])
with tf.variable_scope("output"):
output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])
#print([n_hidden_5, output_size])
return output
def loss(output, y):
"""
Computes softmax cross entropy between logits and labels and then the loss
intput:
- output: the output of the inference function
- y: true value of the sample batch
the two have the same shape (batch_size * num_of_classes)
output:
- loss: loss of the corresponding batch (scalar tensor)
"""
#Computes softmax cross entropy between logits and labels.
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)
loss = tf.reduce_mean(xentropy)
return loss
def training(cost, global_step):
"""
defines the necessary elements to train the network
intput:
- cost: the cost is the loss of the corresponding batch
- global_step: number of batch seen so far, it is incremented by one each time the .minimize() function is called
"""
tf.summary.scalar("cost", cost)
# using Adam Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(cost, global_step=global_step)
return train_op
def evaluate(output, y):
"""
evaluates the accuracy on the validation set
input:
-output: prediction vector of the network for the validation set
-y: true value for the validation set
output:
- accuracy: accuracy on the validation set (scalar between 0 and 1)
"""
#correct prediction is a binary vector which equals one when the output and y match
#otherwise the vector equals 0
#tf.cast: change the type of a tensor into another one
#then, by taking the mean of the tensor, we directly have the average score, so the accuracy
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("validation_error", (1.0 - accuracy))
return accuracy
epochs_completed = 0
index_in_epoch = 0
num_examples = X_train.shape[0]
# for splitting out batches of data
def next_batch(batch_size):
global X_train
global y_train
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return X_train[start:end], y_train[start:end]
if __name__ == '__main__':
with tf.Graph().as_default():
with tf.variable_scope("MNIST_convoultional_model"):
x = tf.placeholder("float", [None, 400])
y = tf.placeholder("float", [None,3])
output = inference(x)
cost = loss(output, y)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = training(cost, global_step)
eval_op = evaluate(output, y)
summary_op = tf.summary.merge_all()
sess = tf.Session()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(file_path, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(df3)/batch_size)
# Loop over all batches
for i in range(total_batch):
minibatch_x, minibatch_y = next_batch(batch_size)
#minibatch_y=minibatch_y.reshape(batch_size ,1)
# Fit training using batch data
sess.run(train_op, feed_dict={x: minibatch_x, y: minibatch_y})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: minibatch_x, y: minibatch_y})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:0.9f}".format(avg_cost))
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Validation Error:", (1 - accuracy))
summary_str = sess.run(summary_op, feed_dict={x: minibatch_x, y: minibatch_y})
summary_writer.add_summary(summary_str, sess.run(global_step))
saver.save(sess, '/Users/ayoujiljad/Documents/Python/2018212-12-06-2018_3_12_22/test_model')
print("Optimization Done")
accuracy = sess.run(eval_op, feed_dict={x: X_test, y: y_test})
print("Test Accuracy:", accuracy)
elapsed_time = time.time() - start_time
print('Execution time was %0.3f' % elapsed_time) | for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i) | random_line_split |
Notebook_1_DataCleansing_FeatureEngineering.py | # Databricks notebook source
# MAGIC %md # Notebook #1
# MAGIC
# MAGIC In this notebook, we show how to import, clean and create features relevent for predictive maintenance data using PySpark. This notebook uses Spark **2.0.2** and Python Python **2.7.5**. The API documentation for that version can be found [here](https://spark.apache.org/docs/2.0.2/api/python/index.html).
# MAGIC
# MAGIC ## Outline
# MAGIC
# MAGIC - [Import Data](#Import-Data)
# MAGIC - [Data Exploration & Cleansing](#Data-Exploration-&-Cleansing)
# MAGIC - [Feature Engineering](#Feature-Engineering)
# MAGIC - [Save Result](#Save-Result)
# COMMAND ----------
import subprocess
import sys
import os
import re
import time
import atexit
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import SQLContext
import pyspark.sql.functions as F
from pyspark.sql.functions import concat, col, udf, lag, date_add, explode, lit, unix_timestamp
from pyspark.sql.functions import month, weekofyear, dayofmonth
from pyspark.sql.types import *
from pyspark.sql.types import DateType
from pyspark.sql.dataframe import *
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.ml.classification import *
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, VectorIndexer
from pyspark.ml.feature import StandardScaler, PCA, RFormula
from pyspark.ml import Pipeline, PipelineModel
start_time = time.time()
# COMMAND ----------
# MAGIC %md ## Import Data
# COMMAND ----------
# MAGIC %md #### We initially encountered some issue while reading the data
# MAGIC - For Spark 2.0 and above, we can use the sqlContext.read.csv to import data directly into the Spark context. The data import seems to work fine and you can also perform some data transformation without any problem. However, when we tried to show the top n rows of the entire data (e.g. data.show(3)) or do some data manipulation on certain columns downstream, we encountered error of “Null Pointer” or “NumberFormatException: null”.
# MAGIC - In our case, it was because for some numeric columns with missing records containing "", Spark still recognizes those column as numeric but somehow cannot parse them correctly. Hopefully the future version of Spark could handle such issue more intelligently.
# MAGIC - We fixed the problem by reformating the data before loading into Spark context as csv format.
# MAGIC ```bash
# MAGIC cat data.csv | tr -d "\"\"" > sampledata.csv
# MAGIC ```
# COMMAND ----------
# MAGIC %md #### Import data from Azure Blob Storage
# MAGIC The input data is hosted on a publicly accessible Azure Blob Storage container and can be downloaded from [here](https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv).
# MAGIC
# MAGIC To learn how to grant read-only access to Azure storage containers or blobs without sharing your account key and without requiring a shared access signature (SAS), please follow the instructions [here](https://docs.microsoft.com/en-us/azure/storage/storage-manage-access-to-resources).
# COMMAND ----------
# MAGIC %sh
# MAGIC curl -O https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv
# COMMAND ----------
# MAGIC %sh
# MAGIC ls -al
# MAGIC pwd
# COMMAND ----------
# MAGIC %fs
# MAGIC cp file:/databricks/driver/sampledata.csv dbfs:/RC/example
# COMMAND ----------
# Import data from Azure Blob Storage
#dataFile = "wasb://sampledata@pysparksampledata.blob.core.windows.net/sampledata.csv"
dataFileSep = ','
# Import data from the home directory on your machine
dataFile = 'dbfs:/RC/example/sampledata.csv'
df = sqlContext.read.csv(dataFile, header=True, sep=dataFileSep, inferSchema=True, nanValue="", mode='PERMISSIVE')
# COMMAND ----------
# MAGIC %md ## Data Exploration & Cleansing
# COMMAND ----------
# MAGIC %md First, let's look at the dataset dimension and data schema.
# COMMAND ----------
# check the dimensions of the data
df.count(), len(df.columns)
# COMMAND ----------
# check whether the issue of df.show() is fixed
df.show(1)
# COMMAND ----------
# check data schema
df.dtypes
# COMMAND ----------
# MAGIC %md #### Explanations on the data schema:
# MAGIC * ***DeviceID***: machine identifier
# MAGIC * ***Date***: the day when that row of data was collected for that machine
# MAGIC * ***Categorical_1 to 4***: some categorical features about the machine
# MAGIC * ***Problem_Type_1 to 4***: the total number of times Problem type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Usage_Count_1 (2)***: the total number of times that machine had been used on that day for purpose type 1 or 2
# MAGIC * ***Warning_xxx***: the total number of Warning type_xxx occured for that machine on that day
# MAGIC * ***Error_Count_1 to 8***: the total number of times Error type 1 (to 8) occured on that day for that machine
# MAGIC * ***Fault_Code_Type_1 to 4***: fault code type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Problemreported***: prediction target column whether or not there is a machine problem on that day
# COMMAND ----------
# MAGIC %md #### As part of the data cleansing process, we standardized all the column names to lower case and replaced all the symbols with underscore. We also removed any duplicated records.
# COMMAND ----------
#--------------------------------------- initial data cleansing ---------------------------------------------#
# standardize the column names
def StandardizeNames(df):
l = df.columns
cols = [c.replace(' ','_').
replace('[.]','_').
replace('.','_').
replace('[[:punct:]]','_').
lower() for c in l]
return df.toDF(*cols)
df = StandardizeNames(df)
# remove duplicated rows based on deviceid and date
df = df.dropDuplicates(['deviceid', 'date'])
# remove rows with missing deviceid, date
df = df.dropna(how='any', subset=['deviceid', 'date'])
df.select('deviceid','date').show(3)
# COMMAND ----------
# MAGIC %md #### Define groups of features -- date, categorical, numeric
# COMMAND ----------
#------------------------------------------- Define groups of features -----------------------------------------#
features_datetime = ['date']
features_categorical = ['deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num < | = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return 'morethan20000'
cat4Udf = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", 1).otherwise(0))
df.groupby('fault_code_type_1_count').count().show()
df.groupby('fault_code_type_2_count').count().show()
df.groupby('fault_code_type_3_count').count().show()
df.groupby('fault_code_type_4_count').count().show()
# COMMAND ----------
# MAGIC %md #### Feature engineering performance related features
# MAGIC We first select 8 raw performance features to be normalized and then select 2 normalizers.
# MAGIC The idea behind this normalization is that device with more problem/error/fault reported might simply because it is used more frequently. Therefore, we need to normalize the problem counts by the corresponding usage counts.
# COMMAND ----------
# First, select the 8 raw performance features to be normalized
performance_normal_raw = ['problem_type_1','problem_type_2','problem_type_3','problem_type_4',
'fault_code_type_1_count','fault_code_type_2_count',
'fault_code_type_3_count', 'fault_code_type_4_count']
# Then, select 2 normalizers
performance_normalizer = ['usage_count_1','usage_count_2']
# Normalize performance_normal_raw by "usage_count_1"
df = df.withColumn("problem_type_1_per_usage1", F.when(df.usage_count_1==0,0).otherwise(df.problem_type_1/df.usage_count_1))\
.withColumn("problem_type_2_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_2/df.usage_count_1))\
.withColumn("problem_type_3_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_3/df.usage_count_1))\
.withColumn("problem_type_4_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_4/df.usage_count_1))\
.withColumn("fault_code_type_1_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_1))\
.withColumn("fault_code_type_2_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_1))\
.withColumn("fault_code_type_3_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_1))\
.withColumn("fault_code_type_4_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_1))
# Normalize performance_normal_raw by "usage_count_2"
df = df.withColumn("problem_type_1_per_usage2", F.when(df.usage_count_2==0,0).otherwise(df.problem_type_1/df.usage_count_2))\
.withColumn("problem_type_2_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_2/df.usage_count_2))\
.withColumn("problem_type_3_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_3/df.usage_count_2))\
.withColumn("problem_type_4_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_4/df.usage_count_2))\
.withColumn("fault_code_type_1_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_2))\
.withColumn("fault_code_type_2_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_2))\
.withColumn("fault_code_type_3_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_2))\
.withColumn("fault_code_type_4_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_2))
# COMMAND ----------
# MAGIC %md #### Similar to what we did for "categorical_1 to 4", in the following cell we binned performance related features and created new categorical features.
# COMMAND ----------
# Define the list of performance related features which we would like to perform binning
c_names = ['problem_type_1', 'problem_type_3', 'problem_type_4',
'problem_type_1_per_usage1','problem_type_2_per_usage1','problem_type_3_per_usage1','problem_type_4_per_usage1',
'problem_type_1_per_usage2','problem_type_2_per_usage2','problem_type_3_per_usage2','problem_type_4_per_usage2',
'fault_code_type_1_count', 'fault_code_type_2_count', 'fault_code_type_3_count', 'fault_code_type_4_count',
'fault_code_type_1_count_per_usage1','fault_code_type_2_count_per_usage1',
'fault_code_type_3_count_per_usage1', 'fault_code_type_4_count_per_usage1',
'fault_code_type_1_count_per_usage2','fault_code_type_2_count_per_usage2',
'fault_code_type_3_count_per_usage2', 'fault_code_type_4_count_per_usage2']
# Bin size ('0','1','>1') for most of the performance features because majority of the values fall into the range of 0 to slightly more than 1.
def performanceCat(num):
if num == 0: return '0'
elif num ==1: return '1'
else: return '>1'
performanceCatUdf = udf(performanceCat, StringType())
for col_n in c_names:
df = df.withColumn(col_n+'_category',performanceCatUdf(df[col_n]))
# Use different bin for "problem_type_2" because we saw a larger spread of the values
def problem_type_2_Cat(num):
if num == 0: return '0'
elif 0 < num and num <= 5: return '1-5'
elif 5 < num and num <= 10: return '6-10'
else: return '>10'
problem_type_2_CatUdf = udf(problem_type_2_Cat, StringType())
df = df.withColumn('problem_type_2_category',problem_type_2_CatUdf(df['problem_type_2']))
print(df.select('problem_type_1_category').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('problem_type_2_category').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### One hot encode some categotical features
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC # Define the list of categorical features
# MAGIC
# MAGIC catVarNames = ['problem_type_1_category', 'problem_type_2_category',
# MAGIC 'problem_type_3_category', 'problem_type_4_category',
# MAGIC 'problem_type_1_per_usage1_category', 'problem_type_2_per_usage1_category',
# MAGIC 'problem_type_3_per_usage1_category', 'problem_type_4_per_usage1_category',
# MAGIC 'problem_type_1_per_usage2_category', 'problem_type_2_per_usage2_category',
# MAGIC 'problem_type_3_per_usage2_category', 'problem_type_4_per_usage2_category',
# MAGIC 'fault_code_type_1_count_category', 'fault_code_type_2_count_category',
# MAGIC 'fault_code_type_3_count_category', 'fault_code_type_4_count_category',
# MAGIC 'fault_code_type_1_count_per_usage1_category', 'fault_code_type_2_count_per_usage1_category',
# MAGIC 'fault_code_type_3_count_per_usage1_category', 'fault_code_type_4_count_per_usage1_category',
# MAGIC 'fault_code_type_1_count_per_usage2_category', 'fault_code_type_2_count_per_usage2_category',
# MAGIC 'fault_code_type_3_count_per_usage2_category', 'fault_code_type_4_count_per_usage2_category',
# MAGIC 'cat1','cat2','cat3','cat4']
# MAGIC
# MAGIC
# MAGIC sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed') for x in catVarNames]
# MAGIC
# MAGIC df_cat = Pipeline(stages=sIndexers).fit(df).transform(df)
# MAGIC
# MAGIC # Remove columns with only 1 level (compute variances of columns)
# MAGIC catColVariance = df_cat.select(
# MAGIC *(F.variance(df_cat[c]).alias(c + '_sd') for c in [cv + '_indexed' for cv in catVarNames]))
# MAGIC catColVariance = catColVariance.rdd.flatMap(lambda x: x).collect()
# MAGIC catVarNames = [catVarNames[k] for k in [i for i, v in enumerate(catColVariance) if v != 0]]
# MAGIC
# MAGIC # Encode
# MAGIC ohEncoders = [OneHotEncoder(inputCol=x + '_indexed', outputCol=x + '_encoded')
# MAGIC for x in catVarNames]
# MAGIC ohPipelineModel = Pipeline(stages=ohEncoders).fit(df_cat)
# MAGIC df_cat = ohPipelineModel.transform(df_cat)
# MAGIC
# MAGIC drop_list = [col_n for col_n in df_cat.columns if 'indexed' in col_n]
# MAGIC df = df_cat.select([column for column in df_cat.columns if column not in drop_list])
# MAGIC
# MAGIC print(df['problem_type_1_category_encoded',].head(3))
# COMMAND ----------
# MAGIC %md #### Use PCA to reduce number of features
# MAGIC In Notebook #2, we will perform a series of rolling computation for various features, time windows and aggregated statistics. This process is very computational expensive and therefore we need to first reduce the feature list.
# MAGIC In the dataset, there are many warning related features and most of them have value of 0 so quite sparse. We can group or find correlations among those warning features, reduce the feature space for downstream work.
# COMMAND ----------
## check the number of warning related features
len([col_n for col_n in df.columns if 'warning' in col_n])
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC #----------------------------- PCA feature grouping on warning related features --------------------------#
# MAGIC
# MAGIC df = df.withColumn("key", concat(df.deviceid,lit("_"),df.date))
# MAGIC
# MAGIC # step 1
# MAGIC # Use RFormula to create the feature vector
# MAGIC formula = RFormula(formula = "~" + "+".join(warning_all))
# MAGIC output = formula.fit(df).transform(df).select("key","features")
# MAGIC
# MAGIC
# MAGIC # step 2
# MAGIC # Before PCA, we need to standardize the features, it is very important...
# MAGIC # Note that StandardScaler does not work for sparse vector unless withMean=false
# MAGIC # OR we can convert sparse vector to dense vector first using toArray
# MAGIC scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
# MAGIC withStd=True, withMean=False)
# MAGIC
# MAGIC # Compute summary statistics by fitting the StandardScaler
# MAGIC scalerModel = scaler.fit(output)
# MAGIC
# MAGIC # Normalize each feature to have unit standard deviation.
# MAGIC scaledData = scalerModel.transform(output)
# MAGIC
# MAGIC
# MAGIC # step 3
# MAGIC pca = PCA(k=20, inputCol="scaledFeatures", outputCol="pcaFeatures")
# MAGIC model = pca.fit(scaledData)
# MAGIC result = model.transform(scaledData).select("key","pcaFeatures")
# MAGIC
# MAGIC # to check how much variance explained by each component
# MAGIC print(model.explainedVariance)
# MAGIC
# MAGIC
# MAGIC # step 4
# MAGIC # convert pca result, a vector column, to mulitple columns
# MAGIC # The reason why we did this was because later on we need to use those columns to generate more features (rolling compute)
# MAGIC def extract(row):
# MAGIC return (row.key, ) + tuple(float(x) for x in row.pcaFeatures.values)
# MAGIC
# MAGIC pca_outcome = result.rdd.map(extract).toDF(["key"])
# MAGIC
# MAGIC # rename columns of pca_outcome
# MAGIC oldColumns = pca_outcome.schema.names
# MAGIC
# MAGIC newColumns = ["key",
# MAGIC "pca_1_warn","pca_2_warn","pca_3_warn","pca_4_warn","pca_5_warn",
# MAGIC "pca_6_warn","pca_7_warn","pca_8_warn","pca_9_warn","pca_10_warn",
# MAGIC "pca_11_warn","pca_12_warn","pca_13_warn","pca_14_warn","pca_15_warn",
# MAGIC "pca_16_warn","pca_17_warn","pca_18_warn","pca_19_warn","pca_20_warn",
# MAGIC ]
# MAGIC
# MAGIC pca_result = reduce(lambda pca_outcome, idx: pca_outcome.withColumnRenamed(oldColumns[idx], newColumns[idx]), \
# MAGIC xrange(len(oldColumns)), pca_outcome)
# MAGIC
# MAGIC df = df.join(pca_result, 'key', 'inner')
# MAGIC
# MAGIC print(df['pca_1_warn',].head(3))
# MAGIC
# MAGIC warning_drop_list = [col_n for col_n in df.columns if 'warning_' in col_n]
# MAGIC df = df.select([column for column in df.columns if column not in warning_drop_list])
# COMMAND ----------
# I would like to visualize the relationship among the 20 pca components
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df2")
sqlStatement2 = """
SELECT pca_1_warn, pca_2_warn, pca_3_warn, pca_4_warn, pca_5_warn,
pca_6_warn, pca_7_warn, pca_8_warn, pca_9_warn, pca_10_warn,
pca_11_warn, pca_12_warn, pca_13_warn, pca_14_warn, pca_15_warn,
pca_16_warn, pca_17_warn, pca_18_warn, pca_19_warn, pca_20_warn
FROM df2
"""
plotdata2 = spark.sql(sqlStatement2).toPandas();
%matplotlib inline
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata2.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# From the plot we can see the 20 pca components do not overlap too much which is expected
# COMMAND ----------
# MAGIC %md ## Save Result
# MAGIC
# MAGIC Due to the lazy compute of Spark, it is usually more efficient to break down the workload into chunks and materialize the intermediate results. For example, we divided the tutorial into three notebooks, the result from Notebook #1 would be used as input data for Notebook #2.
# COMMAND ----------
# MAGIC %%time
# MAGIC /mnt/Exploratory/WCLD/BetaProject
# MAGIC df.write.mode('overwrite').parquet('/mnt/Exploratory/WCLD/BetaProject/notebook1_result.parquet')
# COMMAND ----------
| = 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf | identifier_body |
Notebook_1_DataCleansing_FeatureEngineering.py | # Databricks notebook source
# MAGIC %md # Notebook #1
# MAGIC
# MAGIC In this notebook, we show how to import, clean and create features relevent for predictive maintenance data using PySpark. This notebook uses Spark **2.0.2** and Python Python **2.7.5**. The API documentation for that version can be found [here](https://spark.apache.org/docs/2.0.2/api/python/index.html).
# MAGIC
# MAGIC ## Outline
# MAGIC
# MAGIC - [Import Data](#Import-Data)
# MAGIC - [Data Exploration & Cleansing](#Data-Exploration-&-Cleansing)
# MAGIC - [Feature Engineering](#Feature-Engineering)
# MAGIC - [Save Result](#Save-Result)
# COMMAND ----------
import subprocess
import sys
import os
import re
import time
import atexit
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import SQLContext
import pyspark.sql.functions as F
from pyspark.sql.functions import concat, col, udf, lag, date_add, explode, lit, unix_timestamp
from pyspark.sql.functions import month, weekofyear, dayofmonth
from pyspark.sql.types import *
from pyspark.sql.types import DateType
from pyspark.sql.dataframe import *
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.ml.classification import *
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, VectorIndexer
from pyspark.ml.feature import StandardScaler, PCA, RFormula
from pyspark.ml import Pipeline, PipelineModel
start_time = time.time()
# COMMAND ----------
# MAGIC %md ## Import Data
# COMMAND ----------
# MAGIC %md #### We initially encountered some issue while reading the data
# MAGIC - For Spark 2.0 and above, we can use the sqlContext.read.csv to import data directly into the Spark context. The data import seems to work fine and you can also perform some data transformation without any problem. However, when we tried to show the top n rows of the entire data (e.g. data.show(3)) or do some data manipulation on certain columns downstream, we encountered error of “Null Pointer” or “NumberFormatException: null”.
# MAGIC - In our case, it was because for some numeric columns with missing records containing "", Spark still recognizes those column as numeric but somehow cannot parse them correctly. Hopefully the future version of Spark could handle such issue more intelligently.
# MAGIC - We fixed the problem by reformating the data before loading into Spark context as csv format.
# MAGIC ```bash
# MAGIC cat data.csv | tr -d "\"\"" > sampledata.csv
# MAGIC ```
# COMMAND ----------
# MAGIC %md #### Import data from Azure Blob Storage
# MAGIC The input data is hosted on a publicly accessible Azure Blob Storage container and can be downloaded from [here](https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv).
# MAGIC
# MAGIC To learn how to grant read-only access to Azure storage containers or blobs without sharing your account key and without requiring a shared access signature (SAS), please follow the instructions [here](https://docs.microsoft.com/en-us/azure/storage/storage-manage-access-to-resources).
# COMMAND ----------
# MAGIC %sh
# MAGIC curl -O https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv
# COMMAND ----------
# MAGIC %sh
# MAGIC ls -al
# MAGIC pwd
# COMMAND ----------
# MAGIC %fs
# MAGIC cp file:/databricks/driver/sampledata.csv dbfs:/RC/example
# COMMAND ----------
# Import data from Azure Blob Storage
#dataFile = "wasb://sampledata@pysparksampledata.blob.core.windows.net/sampledata.csv"
dataFileSep = ','
# Import data from the home directory on your machine
dataFile = 'dbfs:/RC/example/sampledata.csv'
df = sqlContext.read.csv(dataFile, header=True, sep=dataFileSep, inferSchema=True, nanValue="", mode='PERMISSIVE')
# COMMAND ----------
# MAGIC %md ## Data Exploration & Cleansing
# COMMAND ----------
# MAGIC %md First, let's look at the dataset dimension and data schema.
# COMMAND ----------
# check the dimensions of the data
df.count(), len(df.columns)
# COMMAND ----------
# check whether the issue of df.show() is fixed
df.show(1)
# COMMAND ----------
# check data schema
df.dtypes
# COMMAND ----------
# MAGIC %md #### Explanations on the data schema:
# MAGIC * ***DeviceID***: machine identifier
# MAGIC * ***Date***: the day when that row of data was collected for that machine
# MAGIC * ***Categorical_1 to 4***: some categorical features about the machine
# MAGIC * ***Problem_Type_1 to 4***: the total number of times Problem type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Usage_Count_1 (2)***: the total number of times that machine had been used on that day for purpose type 1 or 2
# MAGIC * ***Warning_xxx***: the total number of Warning type_xxx occured for that machine on that day
# MAGIC * ***Error_Count_1 to 8***: the total number of times Error type 1 (to 8) occured on that day for that machine
# MAGIC * ***Fault_Code_Type_1 to 4***: fault code type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Problemreported***: prediction target column whether or not there is a machine problem on that day
# COMMAND ----------
# MAGIC %md #### As part of the data cleansing process, we standardized all the column names to lower case and replaced all the symbols with underscore. We also removed any duplicated records.
# COMMAND ----------
#--------------------------------------- initial data cleansing ---------------------------------------------#
# standardize the column names
def Standard | l = df.columns
cols = [c.replace(' ','_').
replace('[.]','_').
replace('.','_').
replace('[[:punct:]]','_').
lower() for c in l]
return df.toDF(*cols)
df = StandardizeNames(df)
# remove duplicated rows based on deviceid and date
df = df.dropDuplicates(['deviceid', 'date'])
# remove rows with missing deviceid, date
df = df.dropna(how='any', subset=['deviceid', 'date'])
df.select('deviceid','date').show(3)
# COMMAND ----------
# MAGIC %md #### Define groups of features -- date, categorical, numeric
# COMMAND ----------
#------------------------------------------- Define groups of features -----------------------------------------#
features_datetime = ['date']
features_categorical = ['deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num <= 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return 'morethan20000'
cat4Udf = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", 1).otherwise(0))
df.groupby('fault_code_type_1_count').count().show()
df.groupby('fault_code_type_2_count').count().show()
df.groupby('fault_code_type_3_count').count().show()
df.groupby('fault_code_type_4_count').count().show()
# COMMAND ----------
# MAGIC %md #### Feature engineering performance related features
# MAGIC We first select 8 raw performance features to be normalized and then select 2 normalizers.
# MAGIC The idea behind this normalization is that device with more problem/error/fault reported might simply because it is used more frequently. Therefore, we need to normalize the problem counts by the corresponding usage counts.
# COMMAND ----------
# First, select the 8 raw performance features to be normalized
performance_normal_raw = ['problem_type_1','problem_type_2','problem_type_3','problem_type_4',
'fault_code_type_1_count','fault_code_type_2_count',
'fault_code_type_3_count', 'fault_code_type_4_count']
# Then, select 2 normalizers
performance_normalizer = ['usage_count_1','usage_count_2']
# Normalize performance_normal_raw by "usage_count_1"
df = df.withColumn("problem_type_1_per_usage1", F.when(df.usage_count_1==0,0).otherwise(df.problem_type_1/df.usage_count_1))\
.withColumn("problem_type_2_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_2/df.usage_count_1))\
.withColumn("problem_type_3_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_3/df.usage_count_1))\
.withColumn("problem_type_4_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_4/df.usage_count_1))\
.withColumn("fault_code_type_1_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_1))\
.withColumn("fault_code_type_2_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_1))\
.withColumn("fault_code_type_3_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_1))\
.withColumn("fault_code_type_4_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_1))
# Normalize performance_normal_raw by "usage_count_2"
df = df.withColumn("problem_type_1_per_usage2", F.when(df.usage_count_2==0,0).otherwise(df.problem_type_1/df.usage_count_2))\
.withColumn("problem_type_2_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_2/df.usage_count_2))\
.withColumn("problem_type_3_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_3/df.usage_count_2))\
.withColumn("problem_type_4_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_4/df.usage_count_2))\
.withColumn("fault_code_type_1_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_2))\
.withColumn("fault_code_type_2_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_2))\
.withColumn("fault_code_type_3_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_2))\
.withColumn("fault_code_type_4_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_2))
# COMMAND ----------
# MAGIC %md #### Similar to what we did for "categorical_1 to 4", in the following cell we binned performance related features and created new categorical features.
# COMMAND ----------
# Define the list of performance related features which we would like to perform binning
c_names = ['problem_type_1', 'problem_type_3', 'problem_type_4',
'problem_type_1_per_usage1','problem_type_2_per_usage1','problem_type_3_per_usage1','problem_type_4_per_usage1',
'problem_type_1_per_usage2','problem_type_2_per_usage2','problem_type_3_per_usage2','problem_type_4_per_usage2',
'fault_code_type_1_count', 'fault_code_type_2_count', 'fault_code_type_3_count', 'fault_code_type_4_count',
'fault_code_type_1_count_per_usage1','fault_code_type_2_count_per_usage1',
'fault_code_type_3_count_per_usage1', 'fault_code_type_4_count_per_usage1',
'fault_code_type_1_count_per_usage2','fault_code_type_2_count_per_usage2',
'fault_code_type_3_count_per_usage2', 'fault_code_type_4_count_per_usage2']
# Bin size ('0','1','>1') for most of the performance features because majority of the values fall into the range of 0 to slightly more than 1.
def performanceCat(num):
if num == 0: return '0'
elif num ==1: return '1'
else: return '>1'
performanceCatUdf = udf(performanceCat, StringType())
for col_n in c_names:
df = df.withColumn(col_n+'_category',performanceCatUdf(df[col_n]))
# Use different bin for "problem_type_2" because we saw a larger spread of the values
def problem_type_2_Cat(num):
if num == 0: return '0'
elif 0 < num and num <= 5: return '1-5'
elif 5 < num and num <= 10: return '6-10'
else: return '>10'
problem_type_2_CatUdf = udf(problem_type_2_Cat, StringType())
df = df.withColumn('problem_type_2_category',problem_type_2_CatUdf(df['problem_type_2']))
print(df.select('problem_type_1_category').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('problem_type_2_category').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### One hot encode some categotical features
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC # Define the list of categorical features
# MAGIC
# MAGIC catVarNames = ['problem_type_1_category', 'problem_type_2_category',
# MAGIC 'problem_type_3_category', 'problem_type_4_category',
# MAGIC 'problem_type_1_per_usage1_category', 'problem_type_2_per_usage1_category',
# MAGIC 'problem_type_3_per_usage1_category', 'problem_type_4_per_usage1_category',
# MAGIC 'problem_type_1_per_usage2_category', 'problem_type_2_per_usage2_category',
# MAGIC 'problem_type_3_per_usage2_category', 'problem_type_4_per_usage2_category',
# MAGIC 'fault_code_type_1_count_category', 'fault_code_type_2_count_category',
# MAGIC 'fault_code_type_3_count_category', 'fault_code_type_4_count_category',
# MAGIC 'fault_code_type_1_count_per_usage1_category', 'fault_code_type_2_count_per_usage1_category',
# MAGIC 'fault_code_type_3_count_per_usage1_category', 'fault_code_type_4_count_per_usage1_category',
# MAGIC 'fault_code_type_1_count_per_usage2_category', 'fault_code_type_2_count_per_usage2_category',
# MAGIC 'fault_code_type_3_count_per_usage2_category', 'fault_code_type_4_count_per_usage2_category',
# MAGIC 'cat1','cat2','cat3','cat4']
# MAGIC
# MAGIC
# MAGIC sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed') for x in catVarNames]
# MAGIC
# MAGIC df_cat = Pipeline(stages=sIndexers).fit(df).transform(df)
# MAGIC
# MAGIC # Remove columns with only 1 level (compute variances of columns)
# MAGIC catColVariance = df_cat.select(
# MAGIC *(F.variance(df_cat[c]).alias(c + '_sd') for c in [cv + '_indexed' for cv in catVarNames]))
# MAGIC catColVariance = catColVariance.rdd.flatMap(lambda x: x).collect()
# MAGIC catVarNames = [catVarNames[k] for k in [i for i, v in enumerate(catColVariance) if v != 0]]
# MAGIC
# MAGIC # Encode
# MAGIC ohEncoders = [OneHotEncoder(inputCol=x + '_indexed', outputCol=x + '_encoded')
# MAGIC for x in catVarNames]
# MAGIC ohPipelineModel = Pipeline(stages=ohEncoders).fit(df_cat)
# MAGIC df_cat = ohPipelineModel.transform(df_cat)
# MAGIC
# MAGIC drop_list = [col_n for col_n in df_cat.columns if 'indexed' in col_n]
# MAGIC df = df_cat.select([column for column in df_cat.columns if column not in drop_list])
# MAGIC
# MAGIC print(df['problem_type_1_category_encoded',].head(3))
# COMMAND ----------
# MAGIC %md #### Use PCA to reduce number of features
# MAGIC In Notebook #2, we will perform a series of rolling computation for various features, time windows and aggregated statistics. This process is very computational expensive and therefore we need to first reduce the feature list.
# MAGIC In the dataset, there are many warning related features and most of them have value of 0 so quite sparse. We can group or find correlations among those warning features, reduce the feature space for downstream work.
# COMMAND ----------
## check the number of warning related features
len([col_n for col_n in df.columns if 'warning' in col_n])
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC #----------------------------- PCA feature grouping on warning related features --------------------------#
# MAGIC
# MAGIC df = df.withColumn("key", concat(df.deviceid,lit("_"),df.date))
# MAGIC
# MAGIC # step 1
# MAGIC # Use RFormula to create the feature vector
# MAGIC formula = RFormula(formula = "~" + "+".join(warning_all))
# MAGIC output = formula.fit(df).transform(df).select("key","features")
# MAGIC
# MAGIC
# MAGIC # step 2
# MAGIC # Before PCA, we need to standardize the features, it is very important...
# MAGIC # Note that StandardScaler does not work for sparse vector unless withMean=false
# MAGIC # OR we can convert sparse vector to dense vector first using toArray
# MAGIC scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
# MAGIC withStd=True, withMean=False)
# MAGIC
# MAGIC # Compute summary statistics by fitting the StandardScaler
# MAGIC scalerModel = scaler.fit(output)
# MAGIC
# MAGIC # Normalize each feature to have unit standard deviation.
# MAGIC scaledData = scalerModel.transform(output)
# MAGIC
# MAGIC
# MAGIC # step 3
# MAGIC pca = PCA(k=20, inputCol="scaledFeatures", outputCol="pcaFeatures")
# MAGIC model = pca.fit(scaledData)
# MAGIC result = model.transform(scaledData).select("key","pcaFeatures")
# MAGIC
# MAGIC # to check how much variance explained by each component
# MAGIC print(model.explainedVariance)
# MAGIC
# MAGIC
# MAGIC # step 4
# MAGIC # convert pca result, a vector column, to mulitple columns
# MAGIC # The reason why we did this was because later on we need to use those columns to generate more features (rolling compute)
# MAGIC def extract(row):
# MAGIC return (row.key, ) + tuple(float(x) for x in row.pcaFeatures.values)
# MAGIC
# MAGIC pca_outcome = result.rdd.map(extract).toDF(["key"])
# MAGIC
# MAGIC # rename columns of pca_outcome
# MAGIC oldColumns = pca_outcome.schema.names
# MAGIC
# MAGIC newColumns = ["key",
# MAGIC "pca_1_warn","pca_2_warn","pca_3_warn","pca_4_warn","pca_5_warn",
# MAGIC "pca_6_warn","pca_7_warn","pca_8_warn","pca_9_warn","pca_10_warn",
# MAGIC "pca_11_warn","pca_12_warn","pca_13_warn","pca_14_warn","pca_15_warn",
# MAGIC "pca_16_warn","pca_17_warn","pca_18_warn","pca_19_warn","pca_20_warn",
# MAGIC ]
# MAGIC
# MAGIC pca_result = reduce(lambda pca_outcome, idx: pca_outcome.withColumnRenamed(oldColumns[idx], newColumns[idx]), \
# MAGIC xrange(len(oldColumns)), pca_outcome)
# MAGIC
# MAGIC df = df.join(pca_result, 'key', 'inner')
# MAGIC
# MAGIC print(df['pca_1_warn',].head(3))
# MAGIC
# MAGIC warning_drop_list = [col_n for col_n in df.columns if 'warning_' in col_n]
# MAGIC df = df.select([column for column in df.columns if column not in warning_drop_list])
# COMMAND ----------
# I would like to visualize the relationship among the 20 pca components
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df2")
sqlStatement2 = """
SELECT pca_1_warn, pca_2_warn, pca_3_warn, pca_4_warn, pca_5_warn,
pca_6_warn, pca_7_warn, pca_8_warn, pca_9_warn, pca_10_warn,
pca_11_warn, pca_12_warn, pca_13_warn, pca_14_warn, pca_15_warn,
pca_16_warn, pca_17_warn, pca_18_warn, pca_19_warn, pca_20_warn
FROM df2
"""
plotdata2 = spark.sql(sqlStatement2).toPandas();
%matplotlib inline
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata2.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# From the plot we can see the 20 pca components do not overlap too much which is expected
# COMMAND ----------
# MAGIC %md ## Save Result
# MAGIC
# MAGIC Due to the lazy compute of Spark, it is usually more efficient to break down the workload into chunks and materialize the intermediate results. For example, we divided the tutorial into three notebooks, the result from Notebook #1 would be used as input data for Notebook #2.
# COMMAND ----------
# MAGIC %%time
# MAGIC /mnt/Exploratory/WCLD/BetaProject
# MAGIC df.write.mode('overwrite').parquet('/mnt/Exploratory/WCLD/BetaProject/notebook1_result.parquet')
# COMMAND ----------
| izeNames(df):
| identifier_name |
Notebook_1_DataCleansing_FeatureEngineering.py | # Databricks notebook source
# MAGIC %md # Notebook #1
# MAGIC
# MAGIC In this notebook, we show how to import, clean and create features relevent for predictive maintenance data using PySpark. This notebook uses Spark **2.0.2** and Python Python **2.7.5**. The API documentation for that version can be found [here](https://spark.apache.org/docs/2.0.2/api/python/index.html).
# MAGIC
# MAGIC ## Outline
# MAGIC
# MAGIC - [Import Data](#Import-Data)
# MAGIC - [Data Exploration & Cleansing](#Data-Exploration-&-Cleansing)
# MAGIC - [Feature Engineering](#Feature-Engineering)
# MAGIC - [Save Result](#Save-Result)
# COMMAND ----------
import subprocess
import sys
import os
import re
import time
import atexit
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import SQLContext
import pyspark.sql.functions as F
from pyspark.sql.functions import concat, col, udf, lag, date_add, explode, lit, unix_timestamp
from pyspark.sql.functions import month, weekofyear, dayofmonth
from pyspark.sql.types import *
from pyspark.sql.types import DateType
from pyspark.sql.dataframe import *
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.ml.classification import *
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, VectorIndexer
from pyspark.ml.feature import StandardScaler, PCA, RFormula
from pyspark.ml import Pipeline, PipelineModel
start_time = time.time()
# COMMAND ----------
# MAGIC %md ## Import Data
# COMMAND ----------
# MAGIC %md #### We initially encountered some issue while reading the data
# MAGIC - For Spark 2.0 and above, we can use the sqlContext.read.csv to import data directly into the Spark context. The data import seems to work fine and you can also perform some data transformation without any problem. However, when we tried to show the top n rows of the entire data (e.g. data.show(3)) or do some data manipulation on certain columns downstream, we encountered error of “Null Pointer” or “NumberFormatException: null”.
# MAGIC - In our case, it was because for some numeric columns with missing records containing "", Spark still recognizes those column as numeric but somehow cannot parse them correctly. Hopefully the future version of Spark could handle such issue more intelligently.
# MAGIC - We fixed the problem by reformating the data before loading into Spark context as csv format.
# MAGIC ```bash
# MAGIC cat data.csv | tr -d "\"\"" > sampledata.csv
# MAGIC ```
# COMMAND ----------
# MAGIC %md #### Import data from Azure Blob Storage
# MAGIC The input data is hosted on a publicly accessible Azure Blob Storage container and can be downloaded from [here](https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv).
# MAGIC
# MAGIC To learn how to grant read-only access to Azure storage containers or blobs without sharing your account key and without requiring a shared access signature (SAS), please follow the instructions [here](https://docs.microsoft.com/en-us/azure/storage/storage-manage-access-to-resources).
# COMMAND ----------
# MAGIC %sh
# MAGIC curl -O https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv
# COMMAND ----------
# MAGIC %sh
# MAGIC ls -al
# MAGIC pwd
# COMMAND ----------
# MAGIC %fs
# MAGIC cp file:/databricks/driver/sampledata.csv dbfs:/RC/example
# COMMAND ----------
# Import data from Azure Blob Storage
#dataFile = "wasb://sampledata@pysparksampledata.blob.core.windows.net/sampledata.csv"
dataFileSep = ','
# Import data from the home directory on your machine
dataFile = 'dbfs:/RC/example/sampledata.csv'
df = sqlContext.read.csv(dataFile, header=True, sep=dataFileSep, inferSchema=True, nanValue="", mode='PERMISSIVE')
# COMMAND ----------
# MAGIC %md ## Data Exploration & Cleansing
# COMMAND ----------
# MAGIC %md First, let's look at the dataset dimension and data schema.
# COMMAND ----------
# check the dimensions of the data
df.count(), len(df.columns)
# COMMAND ----------
# check whether the issue of df.show() is fixed
df.show(1)
# COMMAND ----------
# check data schema
df.dtypes
# COMMAND ----------
# MAGIC %md #### Explanations on the data schema:
# MAGIC * ***DeviceID***: machine identifier
# MAGIC * ***Date***: the day when that row of data was collected for that machine
# MAGIC * ***Categorical_1 to 4***: some categorical features about the machine
# MAGIC * ***Problem_Type_1 to 4***: the total number of times Problem type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Usage_Count_1 (2)***: the total number of times that machine had been used on that day for purpose type 1 or 2
# MAGIC * ***Warning_xxx***: the total number of Warning type_xxx occured for that machine on that day
# MAGIC * ***Error_Count_1 to 8***: the total number of times Error type 1 (to 8) occured on that day for that machine
# MAGIC * ***Fault_Code_Type_1 to 4***: fault code type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Problemreported***: prediction target column whether or not there is a machine problem on that day
# COMMAND ----------
# MAGIC %md #### As part of the data cleansing process, we standardized all the column names to lower case and replaced all the symbols with underscore. We also removed any duplicated records.
# COMMAND ----------
#--------------------------------------- initial data cleansing ---------------------------------------------#
# standardize the column names
def StandardizeNames(df):
l = df.columns
cols = [c.replace(' ','_').
replace('[.]','_').
replace('.','_').
replace('[[:punct:]]','_').
lower() for c in l]
return df.toDF(*cols)
df = StandardizeNames(df)
# remove duplicated rows based on deviceid and date
df = df.dropDuplicates(['deviceid', 'date'])
# remove rows with missing deviceid, date
df = df.dropna(how='any', subset=['deviceid', 'date'])
df.select('deviceid','date').show(3)
# COMMAND ----------
# MAGIC %md #### Define groups of features -- date, categorical, numeric
# COMMAND ----------
#------------------------------------------- Define groups of features -----------------------------------------#
features_datetime = ['date']
features_categorical = ['deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num <= 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return 'morethan20000'
cat4Udf = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", 1).otherwise(0))
df.groupby('fault_code_type_1_count').count().show()
df.groupby('fault_code_type_2_count').count().show()
df.groupby('fault_code_type_3_count').count().show()
df.groupby('fault_code_type_4_count').count().show()
# COMMAND ----------
# MAGIC %md #### Feature engineering performance related features
# MAGIC We first select 8 raw performance features to be normalized and then select 2 normalizers.
# MAGIC The idea behind this normalization is that device with more problem/error/fault reported might simply because it is used more frequently. Therefore, we need to normalize the problem counts by the corresponding usage counts.
# COMMAND ----------
# First, select the 8 raw performance features to be normalized
performance_normal_raw = ['problem_type_1','problem_type_2','problem_type_3','problem_type_4',
'fault_code_type_1_count','fault_code_type_2_count',
'fault_code_type_3_count', 'fault_code_type_4_count']
# Then, select 2 normalizers
performance_normalizer = ['usage_count_1','usage_count_2']
# Normalize performance_normal_raw by "usage_count_1"
df = df.withColumn("problem_type_1_per_usage1", F.when(df.usage_count_1==0,0).otherwise(df.problem_type_1/df.usage_count_1))\
.withColumn("problem_type_2_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_2/df.usage_count_1))\
.withColumn("problem_type_3_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_3/df.usage_count_1))\
.withColumn("problem_type_4_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_4/df.usage_count_1))\
.withColumn("fault_code_type_1_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_1))\
.withColumn("fault_code_type_2_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_1))\
.withColumn("fault_code_type_3_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_1))\
.withColumn("fault_code_type_4_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_1))
# Normalize performance_normal_raw by "usage_count_2"
df = df.withColumn("problem_type_1_per_usage2", F.when(df.usage_count_2==0,0).otherwise(df.problem_type_1/df.usage_count_2))\
.withColumn("problem_type_2_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_2/df.usage_count_2))\
.withColumn("problem_type_3_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_3/df.usage_count_2))\
.withColumn("problem_type_4_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_4/df.usage_count_2))\
.withColumn("fault_code_type_1_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_2))\
.withColumn("fault_code_type_2_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_2))\
.withColumn("fault_code_type_3_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_2))\
.withColumn("fault_code_type_4_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_2))
# COMMAND ----------
# MAGIC %md #### Similar to what we did for "categorical_1 to 4", in the following cell we binned performance related features and created new categorical features.
# COMMAND ----------
# Define the list of performance related features which we would like to perform binning
c_names = ['problem_type_1', 'problem_type_3', 'problem_type_4',
'problem_type_1_per_usage1','problem_type_2_per_usage1','problem_type_3_per_usage1','problem_type_4_per_usage1',
'problem_type_1_per_usage2','problem_type_2_per_usage2','problem_type_3_per_usage2','problem_type_4_per_usage2',
'fault_code_type_1_count', 'fault_code_type_2_count', 'fault_code_type_3_count', 'fault_code_type_4_count',
'fault_code_type_1_count_per_usage1','fault_code_type_2_count_per_usage1',
'fault_code_type_3_count_per_usage1', 'fault_code_type_4_count_per_usage1',
'fault_code_type_1_count_per_usage2','fault_code_type_2_count_per_usage2',
'fault_code_type_3_count_per_usage2', 'fault_code_type_4_count_per_usage2']
# Bin size ('0','1','>1') for most of the performance features because majority of the values fall into the range of 0 to slightly more than 1.
def performanceCat(num):
if num == 0: return '0'
elif num ==1: return '1'
else: return '>1'
performanceCatUdf = udf(performanceCat, StringType())
for col_n in c_names:
df = df.withColumn(col_n+'_category',performanceCatUdf(df[col_n]))
# Use different bin for "problem_type_2" because we saw a larger spread of the values
def problem_type_2_Cat(num):
if num == 0: return '0'
elif 0 < num and num <= 5: return '1-5'
elif 5 < num and num <= 10: return '6-10'
else: return '>10'
problem_type_2_CatUdf = udf(problem_type_2_Cat, StringType())
df = df.withColumn('problem_type_2_category',problem_type_2_CatUdf(df['problem_type_2']))
print(df.select('problem_type_1_category').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('problem_type_2_category').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### One hot encode some categotical features
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC # Define the list of categorical features
# MAGIC
# MAGIC catVarNames = ['problem_type_1_category', 'problem_type_2_category',
# MAGIC 'problem_type_3_category', 'problem_type_4_category',
# MAGIC 'problem_type_1_per_usage1_category', 'problem_type_2_per_usage1_category',
# MAGIC 'problem_type_3_per_usage1_category', 'problem_type_4_per_usage1_category',
# MAGIC 'problem_type_1_per_usage2_category', 'problem_type_2_per_usage2_category',
# MAGIC 'problem_type_3_per_usage2_category', 'problem_type_4_per_usage2_category',
# MAGIC 'fault_code_type_1_count_category', 'fault_code_type_2_count_category',
# MAGIC 'fault_code_type_3_count_category', 'fault_code_type_4_count_category',
# MAGIC 'fault_code_type_1_count_per_usage1_category', 'fault_code_type_2_count_per_usage1_category',
# MAGIC 'fault_code_type_3_count_per_usage1_category', 'fault_code_type_4_count_per_usage1_category',
# MAGIC 'fault_code_type_1_count_per_usage2_category', 'fault_code_type_2_count_per_usage2_category',
# MAGIC 'fault_code_type_3_count_per_usage2_category', 'fault_code_type_4_count_per_usage2_category',
# MAGIC 'cat1','cat2','cat3','cat4']
# MAGIC
# MAGIC
# MAGIC sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed') for x in catVarNames]
# MAGIC
# MAGIC df_cat = Pipeline(stages=sIndexers).fit(df).transform(df)
# MAGIC
# MAGIC # Remove columns with only 1 level (compute variances of columns)
# MAGIC catColVariance = df_cat.select(
# MAGIC *(F.variance(df_cat[c]).alias(c + '_sd') for c in [cv + '_indexed' for cv in catVarNames]))
# MAGIC catColVariance = catColVariance.rdd.flatMap(lambda x: x).collect()
# MAGIC catVarNames = [catVarNames[k] for k in [i for i, v in enumerate(catColVariance) if v != 0]]
# MAGIC
# MAGIC # Encode
# MAGIC ohEncoders = [OneHotEncoder(inputCol=x + '_indexed', outputCol=x + '_encoded')
# MAGIC for x in catVarNames]
# MAGIC ohPipelineModel = Pipeline(stages=ohEncoders).fit(df_cat)
# MAGIC df_cat = ohPipelineModel.transform(df_cat)
# MAGIC
# MAGIC drop_list = [col_n for col_n in df_cat.columns if 'indexed' in col_n]
# MAGIC df = df_cat.select([column for column in df_cat.columns if column not in drop_list])
# MAGIC
# MAGIC print(df['problem_type_1_category_encoded',].head(3))
# COMMAND ----------
# MAGIC %md #### Use PCA to reduce number of features
# MAGIC In Notebook #2, we will perform a series of rolling computation for various features, time windows and aggregated statistics. This process is very computational expensive and therefore we need to first reduce the feature list.
# MAGIC In the dataset, there are many warning related features and most of them have value of 0 so quite sparse. We can group or find correlations among those warning features, reduce the feature space for downstream work.
# COMMAND ----------
## check the number of warning related features
len([col_n for col_n in df.columns if 'warning' in col_n])
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC #----------------------------- PCA feature grouping on warning related features --------------------------#
# MAGIC
# MAGIC df = df.withColumn("key", concat(df.deviceid,lit("_"),df.date))
# MAGIC
# MAGIC # step 1
# MAGIC # Use RFormula to create the feature vector
# MAGIC formula = RFormula(formula = "~" + "+".join(warning_all))
# MAGIC output = formula.fit(df).transform(df).select("key","features")
# MAGIC
# MAGIC
# MAGIC # step 2
# MAGIC # Before PCA, we need to standardize the features, it is very important...
# MAGIC # Note that StandardScaler does not work for sparse vector unless withMean=false
# MAGIC # OR we can convert sparse vector to dense vector first using toArray
# MAGIC scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
# MAGIC withStd=True, withMean=False)
# MAGIC
# MAGIC # Compute summary statistics by fitting the StandardScaler
# MAGIC scalerModel = scaler.fit(output)
# MAGIC
# MAGIC # Normalize each feature to have unit standard deviation.
# MAGIC scaledData = scalerModel.transform(output) | # MAGIC # step 3
# MAGIC pca = PCA(k=20, inputCol="scaledFeatures", outputCol="pcaFeatures")
# MAGIC model = pca.fit(scaledData)
# MAGIC result = model.transform(scaledData).select("key","pcaFeatures")
# MAGIC
# MAGIC # to check how much variance explained by each component
# MAGIC print(model.explainedVariance)
# MAGIC
# MAGIC
# MAGIC # step 4
# MAGIC # convert pca result, a vector column, to mulitple columns
# MAGIC # The reason why we did this was because later on we need to use those columns to generate more features (rolling compute)
# MAGIC def extract(row):
# MAGIC return (row.key, ) + tuple(float(x) for x in row.pcaFeatures.values)
# MAGIC
# MAGIC pca_outcome = result.rdd.map(extract).toDF(["key"])
# MAGIC
# MAGIC # rename columns of pca_outcome
# MAGIC oldColumns = pca_outcome.schema.names
# MAGIC
# MAGIC newColumns = ["key",
# MAGIC "pca_1_warn","pca_2_warn","pca_3_warn","pca_4_warn","pca_5_warn",
# MAGIC "pca_6_warn","pca_7_warn","pca_8_warn","pca_9_warn","pca_10_warn",
# MAGIC "pca_11_warn","pca_12_warn","pca_13_warn","pca_14_warn","pca_15_warn",
# MAGIC "pca_16_warn","pca_17_warn","pca_18_warn","pca_19_warn","pca_20_warn",
# MAGIC ]
# MAGIC
# MAGIC pca_result = reduce(lambda pca_outcome, idx: pca_outcome.withColumnRenamed(oldColumns[idx], newColumns[idx]), \
# MAGIC xrange(len(oldColumns)), pca_outcome)
# MAGIC
# MAGIC df = df.join(pca_result, 'key', 'inner')
# MAGIC
# MAGIC print(df['pca_1_warn',].head(3))
# MAGIC
# MAGIC warning_drop_list = [col_n for col_n in df.columns if 'warning_' in col_n]
# MAGIC df = df.select([column for column in df.columns if column not in warning_drop_list])
# COMMAND ----------
# I would like to visualize the relationship among the 20 pca components
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df2")
sqlStatement2 = """
SELECT pca_1_warn, pca_2_warn, pca_3_warn, pca_4_warn, pca_5_warn,
pca_6_warn, pca_7_warn, pca_8_warn, pca_9_warn, pca_10_warn,
pca_11_warn, pca_12_warn, pca_13_warn, pca_14_warn, pca_15_warn,
pca_16_warn, pca_17_warn, pca_18_warn, pca_19_warn, pca_20_warn
FROM df2
"""
plotdata2 = spark.sql(sqlStatement2).toPandas();
%matplotlib inline
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata2.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# From the plot we can see the 20 pca components do not overlap too much which is expected
# COMMAND ----------
# MAGIC %md ## Save Result
# MAGIC
# MAGIC Due to the lazy compute of Spark, it is usually more efficient to break down the workload into chunks and materialize the intermediate results. For example, we divided the tutorial into three notebooks, the result from Notebook #1 would be used as input data for Notebook #2.
# COMMAND ----------
# MAGIC %%time
# MAGIC /mnt/Exploratory/WCLD/BetaProject
# MAGIC df.write.mode('overwrite').parquet('/mnt/Exploratory/WCLD/BetaProject/notebook1_result.parquet')
# COMMAND ---------- | # MAGIC
# MAGIC | random_line_split |
Notebook_1_DataCleansing_FeatureEngineering.py | # Databricks notebook source
# MAGIC %md # Notebook #1
# MAGIC
# MAGIC In this notebook, we show how to import, clean and create features relevent for predictive maintenance data using PySpark. This notebook uses Spark **2.0.2** and Python Python **2.7.5**. The API documentation for that version can be found [here](https://spark.apache.org/docs/2.0.2/api/python/index.html).
# MAGIC
# MAGIC ## Outline
# MAGIC
# MAGIC - [Import Data](#Import-Data)
# MAGIC - [Data Exploration & Cleansing](#Data-Exploration-&-Cleansing)
# MAGIC - [Feature Engineering](#Feature-Engineering)
# MAGIC - [Save Result](#Save-Result)
# COMMAND ----------
import subprocess
import sys
import os
import re
import time
import atexit
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import SQLContext
import pyspark.sql.functions as F
from pyspark.sql.functions import concat, col, udf, lag, date_add, explode, lit, unix_timestamp
from pyspark.sql.functions import month, weekofyear, dayofmonth
from pyspark.sql.types import *
from pyspark.sql.types import DateType
from pyspark.sql.dataframe import *
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.ml.classification import *
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, VectorIndexer
from pyspark.ml.feature import StandardScaler, PCA, RFormula
from pyspark.ml import Pipeline, PipelineModel
start_time = time.time()
# COMMAND ----------
# MAGIC %md ## Import Data
# COMMAND ----------
# MAGIC %md #### We initially encountered some issue while reading the data
# MAGIC - For Spark 2.0 and above, we can use the sqlContext.read.csv to import data directly into the Spark context. The data import seems to work fine and you can also perform some data transformation without any problem. However, when we tried to show the top n rows of the entire data (e.g. data.show(3)) or do some data manipulation on certain columns downstream, we encountered error of “Null Pointer” or “NumberFormatException: null”.
# MAGIC - In our case, it was because for some numeric columns with missing records containing "", Spark still recognizes those column as numeric but somehow cannot parse them correctly. Hopefully the future version of Spark could handle such issue more intelligently.
# MAGIC - We fixed the problem by reformating the data before loading into Spark context as csv format.
# MAGIC ```bash
# MAGIC cat data.csv | tr -d "\"\"" > sampledata.csv
# MAGIC ```
# COMMAND ----------
# MAGIC %md #### Import data from Azure Blob Storage
# MAGIC The input data is hosted on a publicly accessible Azure Blob Storage container and can be downloaded from [here](https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv).
# MAGIC
# MAGIC To learn how to grant read-only access to Azure storage containers or blobs without sharing your account key and without requiring a shared access signature (SAS), please follow the instructions [here](https://docs.microsoft.com/en-us/azure/storage/storage-manage-access-to-resources).
# COMMAND ----------
# MAGIC %sh
# MAGIC curl -O https://pysparksampledata.blob.core.windows.net/sampledata/sampledata.csv
# COMMAND ----------
# MAGIC %sh
# MAGIC ls -al
# MAGIC pwd
# COMMAND ----------
# MAGIC %fs
# MAGIC cp file:/databricks/driver/sampledata.csv dbfs:/RC/example
# COMMAND ----------
# Import data from Azure Blob Storage
#dataFile = "wasb://sampledata@pysparksampledata.blob.core.windows.net/sampledata.csv"
dataFileSep = ','
# Import data from the home directory on your machine
dataFile = 'dbfs:/RC/example/sampledata.csv'
df = sqlContext.read.csv(dataFile, header=True, sep=dataFileSep, inferSchema=True, nanValue="", mode='PERMISSIVE')
# COMMAND ----------
# MAGIC %md ## Data Exploration & Cleansing
# COMMAND ----------
# MAGIC %md First, let's look at the dataset dimension and data schema.
# COMMAND ----------
# check the dimensions of the data
df.count(), len(df.columns)
# COMMAND ----------
# check whether the issue of df.show() is fixed
df.show(1)
# COMMAND ----------
# check data schema
df.dtypes
# COMMAND ----------
# MAGIC %md #### Explanations on the data schema:
# MAGIC * ***DeviceID***: machine identifier
# MAGIC * ***Date***: the day when that row of data was collected for that machine
# MAGIC * ***Categorical_1 to 4***: some categorical features about the machine
# MAGIC * ***Problem_Type_1 to 4***: the total number of times Problem type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Usage_Count_1 (2)***: the total number of times that machine had been used on that day for purpose type 1 or 2
# MAGIC * ***Warning_xxx***: the total number of Warning type_xxx occured for that machine on that day
# MAGIC * ***Error_Count_1 to 8***: the total number of times Error type 1 (to 8) occured on that day for that machine
# MAGIC * ***Fault_Code_Type_1 to 4***: fault code type 1 (2, 3, 4) occured on that day for that machine
# MAGIC * ***Problemreported***: prediction target column whether or not there is a machine problem on that day
# COMMAND ----------
# MAGIC %md #### As part of the data cleansing process, we standardized all the column names to lower case and replaced all the symbols with underscore. We also removed any duplicated records.
# COMMAND ----------
#--------------------------------------- initial data cleansing ---------------------------------------------#
# standardize the column names
def StandardizeNames(df):
l = df.columns
cols = [c.replace(' ','_').
replace('[.]','_').
replace('.','_').
replace('[[:punct:]]','_').
lower() for c in l]
return df.toDF(*cols)
df = StandardizeNames(df)
# remove duplicated rows based on deviceid and date
df = df.dropDuplicates(['deviceid', 'date'])
# remove rows with missing deviceid, date
df = df.dropna(how='any', subset=['deviceid', 'date'])
df.select('deviceid','date').show(3)
# COMMAND ----------
# MAGIC %md #### Define groups of features -- date, categorical, numeric
# COMMAND ----------
#------------------------------------------- Define groups of features -----------------------------------------#
features_datetime = ['date']
features_categorical = ['deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num <= 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return ' | = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", 1).otherwise(0))
df.groupby('fault_code_type_1_count').count().show()
df.groupby('fault_code_type_2_count').count().show()
df.groupby('fault_code_type_3_count').count().show()
df.groupby('fault_code_type_4_count').count().show()
# COMMAND ----------
# MAGIC %md #### Feature engineering performance related features
# MAGIC We first select 8 raw performance features to be normalized and then select 2 normalizers.
# MAGIC The idea behind this normalization is that device with more problem/error/fault reported might simply because it is used more frequently. Therefore, we need to normalize the problem counts by the corresponding usage counts.
# COMMAND ----------
# First, select the 8 raw performance features to be normalized
performance_normal_raw = ['problem_type_1','problem_type_2','problem_type_3','problem_type_4',
'fault_code_type_1_count','fault_code_type_2_count',
'fault_code_type_3_count', 'fault_code_type_4_count']
# Then, select 2 normalizers
performance_normalizer = ['usage_count_1','usage_count_2']
# Normalize performance_normal_raw by "usage_count_1"
df = df.withColumn("problem_type_1_per_usage1", F.when(df.usage_count_1==0,0).otherwise(df.problem_type_1/df.usage_count_1))\
.withColumn("problem_type_2_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_2/df.usage_count_1))\
.withColumn("problem_type_3_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_3/df.usage_count_1))\
.withColumn("problem_type_4_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.problem_type_4/df.usage_count_1))\
.withColumn("fault_code_type_1_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_1))\
.withColumn("fault_code_type_2_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_1))\
.withColumn("fault_code_type_3_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_1))\
.withColumn("fault_code_type_4_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_1))
# Normalize performance_normal_raw by "usage_count_2"
df = df.withColumn("problem_type_1_per_usage2", F.when(df.usage_count_2==0,0).otherwise(df.problem_type_1/df.usage_count_2))\
.withColumn("problem_type_2_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_2/df.usage_count_2))\
.withColumn("problem_type_3_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_3/df.usage_count_2))\
.withColumn("problem_type_4_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_4/df.usage_count_2))\
.withColumn("fault_code_type_1_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_2))\
.withColumn("fault_code_type_2_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_2))\
.withColumn("fault_code_type_3_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_2))\
.withColumn("fault_code_type_4_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_2))
# COMMAND ----------
# MAGIC %md #### Similar to what we did for "categorical_1 to 4", in the following cell we binned performance related features and created new categorical features.
# COMMAND ----------
# Define the list of performance related features which we would like to perform binning
c_names = ['problem_type_1', 'problem_type_3', 'problem_type_4',
'problem_type_1_per_usage1','problem_type_2_per_usage1','problem_type_3_per_usage1','problem_type_4_per_usage1',
'problem_type_1_per_usage2','problem_type_2_per_usage2','problem_type_3_per_usage2','problem_type_4_per_usage2',
'fault_code_type_1_count', 'fault_code_type_2_count', 'fault_code_type_3_count', 'fault_code_type_4_count',
'fault_code_type_1_count_per_usage1','fault_code_type_2_count_per_usage1',
'fault_code_type_3_count_per_usage1', 'fault_code_type_4_count_per_usage1',
'fault_code_type_1_count_per_usage2','fault_code_type_2_count_per_usage2',
'fault_code_type_3_count_per_usage2', 'fault_code_type_4_count_per_usage2']
# Bin size ('0','1','>1') for most of the performance features because majority of the values fall into the range of 0 to slightly more than 1.
def performanceCat(num):
if num == 0: return '0'
elif num ==1: return '1'
else: return '>1'
performanceCatUdf = udf(performanceCat, StringType())
for col_n in c_names:
df = df.withColumn(col_n+'_category',performanceCatUdf(df[col_n]))
# Use different bin for "problem_type_2" because we saw a larger spread of the values
def problem_type_2_Cat(num):
if num == 0: return '0'
elif 0 < num and num <= 5: return '1-5'
elif 5 < num and num <= 10: return '6-10'
else: return '>10'
problem_type_2_CatUdf = udf(problem_type_2_Cat, StringType())
df = df.withColumn('problem_type_2_category',problem_type_2_CatUdf(df['problem_type_2']))
print(df.select('problem_type_1_category').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('problem_type_2_category').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### One hot encode some categotical features
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC # Define the list of categorical features
# MAGIC
# MAGIC catVarNames = ['problem_type_1_category', 'problem_type_2_category',
# MAGIC 'problem_type_3_category', 'problem_type_4_category',
# MAGIC 'problem_type_1_per_usage1_category', 'problem_type_2_per_usage1_category',
# MAGIC 'problem_type_3_per_usage1_category', 'problem_type_4_per_usage1_category',
# MAGIC 'problem_type_1_per_usage2_category', 'problem_type_2_per_usage2_category',
# MAGIC 'problem_type_3_per_usage2_category', 'problem_type_4_per_usage2_category',
# MAGIC 'fault_code_type_1_count_category', 'fault_code_type_2_count_category',
# MAGIC 'fault_code_type_3_count_category', 'fault_code_type_4_count_category',
# MAGIC 'fault_code_type_1_count_per_usage1_category', 'fault_code_type_2_count_per_usage1_category',
# MAGIC 'fault_code_type_3_count_per_usage1_category', 'fault_code_type_4_count_per_usage1_category',
# MAGIC 'fault_code_type_1_count_per_usage2_category', 'fault_code_type_2_count_per_usage2_category',
# MAGIC 'fault_code_type_3_count_per_usage2_category', 'fault_code_type_4_count_per_usage2_category',
# MAGIC 'cat1','cat2','cat3','cat4']
# MAGIC
# MAGIC
# MAGIC sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed') for x in catVarNames]
# MAGIC
# MAGIC df_cat = Pipeline(stages=sIndexers).fit(df).transform(df)
# MAGIC
# MAGIC # Remove columns with only 1 level (compute variances of columns)
# MAGIC catColVariance = df_cat.select(
# MAGIC *(F.variance(df_cat[c]).alias(c + '_sd') for c in [cv + '_indexed' for cv in catVarNames]))
# MAGIC catColVariance = catColVariance.rdd.flatMap(lambda x: x).collect()
# MAGIC catVarNames = [catVarNames[k] for k in [i for i, v in enumerate(catColVariance) if v != 0]]
# MAGIC
# MAGIC # Encode
# MAGIC ohEncoders = [OneHotEncoder(inputCol=x + '_indexed', outputCol=x + '_encoded')
# MAGIC for x in catVarNames]
# MAGIC ohPipelineModel = Pipeline(stages=ohEncoders).fit(df_cat)
# MAGIC df_cat = ohPipelineModel.transform(df_cat)
# MAGIC
# MAGIC drop_list = [col_n for col_n in df_cat.columns if 'indexed' in col_n]
# MAGIC df = df_cat.select([column for column in df_cat.columns if column not in drop_list])
# MAGIC
# MAGIC print(df['problem_type_1_category_encoded',].head(3))
# COMMAND ----------
# MAGIC %md #### Use PCA to reduce number of features
# MAGIC In Notebook #2, we will perform a series of rolling computation for various features, time windows and aggregated statistics. This process is very computational expensive and therefore we need to first reduce the feature list.
# MAGIC In the dataset, there are many warning related features and most of them have value of 0 so quite sparse. We can group or find correlations among those warning features, reduce the feature space for downstream work.
# COMMAND ----------
## check the number of warning related features
len([col_n for col_n in df.columns if 'warning' in col_n])
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC #----------------------------- PCA feature grouping on warning related features --------------------------#
# MAGIC
# MAGIC df = df.withColumn("key", concat(df.deviceid,lit("_"),df.date))
# MAGIC
# MAGIC # step 1
# MAGIC # Use RFormula to create the feature vector
# MAGIC formula = RFormula(formula = "~" + "+".join(warning_all))
# MAGIC output = formula.fit(df).transform(df).select("key","features")
# MAGIC
# MAGIC
# MAGIC # step 2
# MAGIC # Before PCA, we need to standardize the features, it is very important...
# MAGIC # Note that StandardScaler does not work for sparse vector unless withMean=false
# MAGIC # OR we can convert sparse vector to dense vector first using toArray
# MAGIC scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
# MAGIC withStd=True, withMean=False)
# MAGIC
# MAGIC # Compute summary statistics by fitting the StandardScaler
# MAGIC scalerModel = scaler.fit(output)
# MAGIC
# MAGIC # Normalize each feature to have unit standard deviation.
# MAGIC scaledData = scalerModel.transform(output)
# MAGIC
# MAGIC
# MAGIC # step 3
# MAGIC pca = PCA(k=20, inputCol="scaledFeatures", outputCol="pcaFeatures")
# MAGIC model = pca.fit(scaledData)
# MAGIC result = model.transform(scaledData).select("key","pcaFeatures")
# MAGIC
# MAGIC # to check how much variance explained by each component
# MAGIC print(model.explainedVariance)
# MAGIC
# MAGIC
# MAGIC # step 4
# MAGIC # convert pca result, a vector column, to mulitple columns
# MAGIC # The reason why we did this was because later on we need to use those columns to generate more features (rolling compute)
# MAGIC def extract(row):
# MAGIC return (row.key, ) + tuple(float(x) for x in row.pcaFeatures.values)
# MAGIC
# MAGIC pca_outcome = result.rdd.map(extract).toDF(["key"])
# MAGIC
# MAGIC # rename columns of pca_outcome
# MAGIC oldColumns = pca_outcome.schema.names
# MAGIC
# MAGIC newColumns = ["key",
# MAGIC "pca_1_warn","pca_2_warn","pca_3_warn","pca_4_warn","pca_5_warn",
# MAGIC "pca_6_warn","pca_7_warn","pca_8_warn","pca_9_warn","pca_10_warn",
# MAGIC "pca_11_warn","pca_12_warn","pca_13_warn","pca_14_warn","pca_15_warn",
# MAGIC "pca_16_warn","pca_17_warn","pca_18_warn","pca_19_warn","pca_20_warn",
# MAGIC ]
# MAGIC
# MAGIC pca_result = reduce(lambda pca_outcome, idx: pca_outcome.withColumnRenamed(oldColumns[idx], newColumns[idx]), \
# MAGIC xrange(len(oldColumns)), pca_outcome)
# MAGIC
# MAGIC df = df.join(pca_result, 'key', 'inner')
# MAGIC
# MAGIC print(df['pca_1_warn',].head(3))
# MAGIC
# MAGIC warning_drop_list = [col_n for col_n in df.columns if 'warning_' in col_n]
# MAGIC df = df.select([column for column in df.columns if column not in warning_drop_list])
# COMMAND ----------
# I would like to visualize the relationship among the 20 pca components
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df2")
sqlStatement2 = """
SELECT pca_1_warn, pca_2_warn, pca_3_warn, pca_4_warn, pca_5_warn,
pca_6_warn, pca_7_warn, pca_8_warn, pca_9_warn, pca_10_warn,
pca_11_warn, pca_12_warn, pca_13_warn, pca_14_warn, pca_15_warn,
pca_16_warn, pca_17_warn, pca_18_warn, pca_19_warn, pca_20_warn
FROM df2
"""
plotdata2 = spark.sql(sqlStatement2).toPandas();
%matplotlib inline
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata2.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# From the plot we can see the 20 pca components do not overlap too much which is expected
# COMMAND ----------
# MAGIC %md ## Save Result
# MAGIC
# MAGIC Due to the lazy compute of Spark, it is usually more efficient to break down the workload into chunks and materialize the intermediate results. For example, we divided the tutorial into three notebooks, the result from Notebook #1 would be used as input data for Notebook #2.
# COMMAND ----------
# MAGIC %%time
# MAGIC /mnt/Exploratory/WCLD/BetaProject
# MAGIC df.write.mode('overwrite').parquet('/mnt/Exploratory/WCLD/BetaProject/notebook1_result.parquet')
# COMMAND ----------
| morethan20000'
cat4Udf | conditional_block |
EffectObject.ts | //////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014-2015, MicroGame Technology Inc.
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
{
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private runPos():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
|
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
p._sonAngle = this._sonAngle;
p._posx = this._posx;
p._posy = this._posy;
return p;
}
public dispose():void
{
this.deleting = true;
this.owner = null;
this.target = null;
this.skillid = 0;
if(this._monitor && this._monitor.parent) this._monitor.parent.removeChild(this._monitor);
EffectObject.back2pool(this);
}
}
}
| {
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
} | conditional_block |
EffectObject.ts | //////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014-2015, MicroGame Technology Inc.
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
{
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private | ():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
{
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
}
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
p._sonAngle = this._sonAngle;
p._posx = this._posx;
p._posy = this._posy;
return p;
}
public dispose():void
{
this.deleting = true;
this.owner = null;
this.target = null;
this.skillid = 0;
if(this._monitor && this._monitor.parent) this._monitor.parent.removeChild(this._monitor);
EffectObject.back2pool(this);
}
}
}
| runPos | identifier_name |
EffectObject.ts | //////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014-2015, MicroGame Technology Inc.
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
| {
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private runPos():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
{
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
}
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
p._sonAngle = this._sonAngle;
p._posx = this._posx;
p._posy = this._posy;
return p;
}
public dispose():void
{
this.deleting = true;
this.owner = null;
this.target = null;
this.skillid = 0;
if(this._monitor && this._monitor.parent) this._monitor.parent.removeChild(this._monitor);
EffectObject.back2pool(this);
}
}
} | this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
| random_line_split |
config.go | package abcconfig
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/friendsofgo/errors"
"github.com/kat-co/vala"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
// Config object used to initialize configuration
type Config struct {
// The config file path, overwritten in tests to point to a tmp file
File string
// Specify which environment to load, empty string means pull the
// env from the configuration file, cmdline and env vars.
LoadEnv string
// Prefix the environment variables with this name so that the config
// variables don't conflict with other abcweb apps
EnvPrefix string
}
// NewConfig creates a new Config object used to initialize configuration
func NewConfig(envPrefix string) *Config {
return &Config{
File: "config.toml",
EnvPrefix: envPrefix,
}
}
// AppConfig struct includes the necessary abcweb config components.
// If you'd rather use your own struct so that you can add new values
// to your configuration you can do so, but make sure you include
// *ServerConfig objects and *DBConfig objects like below (if desired).
//
// If you do not wish to use a database then you can exclude the DBConfig
// struct in your own struct, but if using this AppConfig struct then
// DBConfig MUST be initialized and database configuration must be present.
type AppConfig struct {
// The active environment section
Env string `toml:"env" mapstructure:"env" env:"ENV"`
Server ServerConfig `toml:"server" mapstructure:"server"`
DB DBConfig `toml:"db" mapstructure:"db"`
}
// ServerConfig is config for the app loaded through environment variables,
// command line, or the config.toml file.
type ServerConfig struct {
// LiveReload enabled or disabled
LiveReload bool `toml:"live-reload" mapstructure:"live-reload" env:"SERVER_LIVE_RELOAD"`
// Use the production logger (JSON and log level warn) or the
// development logger (console and log level info)
ProdLogger bool `toml:"prod-logger" mapstructure:"prod-logger" env:"SERVER_PROD_LOGGER"` | Bind string `toml:"bind" mapstructure:"bind" env:"SERVER_BIND"`
// https bind address. ":<port>" for all interfaces
TLSBind string `toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v.NumField(); i++ {
cv := v.Field(i)
tag := v.Type().Field(i).Tag
ms := tag.Get("mapstructure")
env := tag.Get("env")
if cv.Kind() == reflect.Ptr {
nv := reflect.Indirect(cv)
// If it has no mapstructure set then fail gracefully,
// because it's probably not a field that should be read by viper.
// For example, a pointer to something that is late-initialized
// and isn't loaded by Bind or present in the config file.
if !nv.IsValid() && ms == "" {
continue
} else if !nv.IsValid() {
return nil, fmt.Errorf("cannot access non-initialized pointer %#v", cv)
}
// Only indirect struct types, if they're valid
if nv.Kind() == reflect.Struct {
cv = nv
}
}
// nc = newchain
var nc string
if chain != "" {
nc = strings.Join([]string{chain, ms}, ".")
} else {
nc = ms
}
switch cv.Kind() {
case reflect.Struct:
m, err := getTagMappingsRecursive(nc, cv)
if err != nil {
return nil, err
}
mappings = append(mappings, m...)
default:
if env != "" && ms != "" {
mappings = append(mappings, Mapping{chain: nc, env: env})
}
}
}
return mappings, nil
}
// GetTagMappings returns the viper .BindEnv mappings for an entire config
// struct.
func GetTagMappings(cfg interface{}) (Mappings, error) {
return getTagMappingsRecursive("", reflect.Indirect(reflect.ValueOf(cfg)))
}
// NewFlagSet creates the set of flags specific to the server and db config
// and the root level config (like --version, --env)
func NewFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
flags.AddFlagSet(NewRootFlagSet())
flags.AddFlagSet(NewServerFlagSet())
flags.AddFlagSet(NewDBFlagSet())
return flags
}
// NewRootFlagSet returns a list of top level flags (flags that arent contained
// inside an environment section in the config)
func NewRootFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// root level flags
flags.BoolP("version", "", false, "Display the build version hash")
flags.StringP("env", "e", "prod", "The config files environment to load")
return flags
}
// NewServerFlagSet returns a list of flags contained within the [server]
// section of a config
func NewServerFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// server subsection flags
flags.BoolP("server.live-reload", "", false, "Enable or disable LiveReload")
flags.BoolP("server.prod-logger", "", true, "Use the production logger, JSON and log level warn")
flags.StringP("server.bind", "", "", `HTTP bind address, eg: ":80"`)
flags.StringP("server.tls-bind", "", "", `HTTPS bind address, eg: ":443"`)
flags.StringP("server.tls-cert-file", "", "", "TLS certificate file path")
flags.StringP("server.tls-key-file", "", "", "TLS key file path")
flags.StringP("server.public-path", "", "public", "The path to the public folder containing assets")
flags.DurationP("server.read-timeout", "", time.Second*10, "Maximum duration before timing out read of the request")
flags.DurationP("server.write-timeout", "", time.Second*15, "Maximum duration before timing out write of the response")
flags.DurationP("server.idle-timeout", "", time.Second*120, "Maximum duration before timing out idle keep-alive connection")
// manifest.json is created as a part of the gulp production "build" task,
// it maps fingerprinted asset names to regular asset names, for example:
// {"js/main.css": "js/e2a3ff9-main.css"}.
// This should only be set to true if doing asset fingerprinting.
flags.BoolP("server.assets-manifest", "", true, "Use manifest.json for mapping asset names to fingerprinted assets")
// This should be used in development mode to prevent browser caching of assets
flags.BoolP("server.assets-no-cache", "", false, "Disable browsers caching asset files by setting response headers")
// This should be used in development mode to avoid having to reload the
// server on every template file modification.
flags.BoolP("server.render-recompile", "", false, "Enable recompilation of the template on each render")
// Defined in app/sessions.go -- Usually cookie storer for dev and disk storer for prod.
flags.BoolP("server.sessions-dev-storer", "", false, "Use the development mode sessions storer (defined in app/sessions.go)")
return flags
}
// NewDBFlagSet returns a list of flags contained within the [db] section
// of a config
func NewDBFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// db subsection flags
flags.StringP("db.dbname", "", "", "The database name to connect to")
flags.StringP("db.host", "", "", "The database hostname, e.g localhost")
flags.IntP("db.port", "", 0, "The database port")
flags.StringP("db.user", "", "", "The database username")
flags.StringP("db.pass", "", "", "The database password")
flags.StringP("db.sslmode", "", "", "The database sslmode")
flags.BoolP("db.enforce-migrations", "", true, "Throw error on app start if database is not using latest migration")
return flags
} | // http bind address. ":<port>" for all interfaces | random_line_split |
config.go | package abcconfig
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/friendsofgo/errors"
"github.com/kat-co/vala"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
// Config object used to initialize configuration
type Config struct {
// The config file path, overwritten in tests to point to a tmp file
File string
// Specify which environment to load, empty string means pull the
// env from the configuration file, cmdline and env vars.
LoadEnv string
// Prefix the environment variables with this name so that the config
// variables don't conflict with other abcweb apps
EnvPrefix string
}
// NewConfig creates a new Config object used to initialize configuration
func NewConfig(envPrefix string) *Config {
return &Config{
File: "config.toml",
EnvPrefix: envPrefix,
}
}
// AppConfig struct includes the necessary abcweb config components.
// If you'd rather use your own struct so that you can add new values
// to your configuration you can do so, but make sure you include
// *ServerConfig objects and *DBConfig objects like below (if desired).
//
// If you do not wish to use a database then you can exclude the DBConfig
// struct in your own struct, but if using this AppConfig struct then
// DBConfig MUST be initialized and database configuration must be present.
type AppConfig struct {
// The active environment section
Env string `toml:"env" mapstructure:"env" env:"ENV"`
Server ServerConfig `toml:"server" mapstructure:"server"`
DB DBConfig `toml:"db" mapstructure:"db"`
}
// ServerConfig is config for the app loaded through environment variables,
// command line, or the config.toml file.
type ServerConfig struct {
// LiveReload enabled or disabled
LiveReload bool `toml:"live-reload" mapstructure:"live-reload" env:"SERVER_LIVE_RELOAD"`
// Use the production logger (JSON and log level warn) or the
// development logger (console and log level info)
ProdLogger bool `toml:"prod-logger" mapstructure:"prod-logger" env:"SERVER_PROD_LOGGER"`
// http bind address. ":<port>" for all interfaces
Bind string `toml:"bind" mapstructure:"bind" env:"SERVER_BIND"`
// https bind address. ":<port>" for all interfaces
TLSBind string `toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) | (v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v.NumField(); i++ {
cv := v.Field(i)
tag := v.Type().Field(i).Tag
ms := tag.Get("mapstructure")
env := tag.Get("env")
if cv.Kind() == reflect.Ptr {
nv := reflect.Indirect(cv)
// If it has no mapstructure set then fail gracefully,
// because it's probably not a field that should be read by viper.
// For example, a pointer to something that is late-initialized
// and isn't loaded by Bind or present in the config file.
if !nv.IsValid() && ms == "" {
continue
} else if !nv.IsValid() {
return nil, fmt.Errorf("cannot access non-initialized pointer %#v", cv)
}
// Only indirect struct types, if they're valid
if nv.Kind() == reflect.Struct {
cv = nv
}
}
// nc = newchain
var nc string
if chain != "" {
nc = strings.Join([]string{chain, ms}, ".")
} else {
nc = ms
}
switch cv.Kind() {
case reflect.Struct:
m, err := getTagMappingsRecursive(nc, cv)
if err != nil {
return nil, err
}
mappings = append(mappings, m...)
default:
if env != "" && ms != "" {
mappings = append(mappings, Mapping{chain: nc, env: env})
}
}
}
return mappings, nil
}
// GetTagMappings returns the viper .BindEnv mappings for an entire config
// struct.
func GetTagMappings(cfg interface{}) (Mappings, error) {
return getTagMappingsRecursive("", reflect.Indirect(reflect.ValueOf(cfg)))
}
// NewFlagSet creates the set of flags specific to the server and db config
// and the root level config (like --version, --env)
func NewFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
flags.AddFlagSet(NewRootFlagSet())
flags.AddFlagSet(NewServerFlagSet())
flags.AddFlagSet(NewDBFlagSet())
return flags
}
// NewRootFlagSet returns a list of top level flags (flags that arent contained
// inside an environment section in the config)
func NewRootFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// root level flags
flags.BoolP("version", "", false, "Display the build version hash")
flags.StringP("env", "e", "prod", "The config files environment to load")
return flags
}
// NewServerFlagSet returns a list of flags contained within the [server]
// section of a config
func NewServerFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// server subsection flags
flags.BoolP("server.live-reload", "", false, "Enable or disable LiveReload")
flags.BoolP("server.prod-logger", "", true, "Use the production logger, JSON and log level warn")
flags.StringP("server.bind", "", "", `HTTP bind address, eg: ":80"`)
flags.StringP("server.tls-bind", "", "", `HTTPS bind address, eg: ":443"`)
flags.StringP("server.tls-cert-file", "", "", "TLS certificate file path")
flags.StringP("server.tls-key-file", "", "", "TLS key file path")
flags.StringP("server.public-path", "", "public", "The path to the public folder containing assets")
flags.DurationP("server.read-timeout", "", time.Second*10, "Maximum duration before timing out read of the request")
flags.DurationP("server.write-timeout", "", time.Second*15, "Maximum duration before timing out write of the response")
flags.DurationP("server.idle-timeout", "", time.Second*120, "Maximum duration before timing out idle keep-alive connection")
// manifest.json is created as a part of the gulp production "build" task,
// it maps fingerprinted asset names to regular asset names, for example:
// {"js/main.css": "js/e2a3ff9-main.css"}.
// This should only be set to true if doing asset fingerprinting.
flags.BoolP("server.assets-manifest", "", true, "Use manifest.json for mapping asset names to fingerprinted assets")
// This should be used in development mode to prevent browser caching of assets
flags.BoolP("server.assets-no-cache", "", false, "Disable browsers caching asset files by setting response headers")
// This should be used in development mode to avoid having to reload the
// server on every template file modification.
flags.BoolP("server.render-recompile", "", false, "Enable recompilation of the template on each render")
// Defined in app/sessions.go -- Usually cookie storer for dev and disk storer for prod.
flags.BoolP("server.sessions-dev-storer", "", false, "Use the development mode sessions storer (defined in app/sessions.go)")
return flags
}
// NewDBFlagSet returns a list of flags contained within the [db] section
// of a config
func NewDBFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// db subsection flags
flags.StringP("db.dbname", "", "", "The database name to connect to")
flags.StringP("db.host", "", "", "The database hostname, e.g localhost")
flags.IntP("db.port", "", 0, "The database port")
flags.StringP("db.user", "", "", "The database username")
flags.StringP("db.pass", "", "", "The database password")
flags.StringP("db.sslmode", "", "", "The database sslmode")
flags.BoolP("db.enforce-migrations", "", true, "Throw error on app start if database is not using latest migration")
return flags
}
| ConfigureViper | identifier_name |
config.go | package abcconfig
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/friendsofgo/errors"
"github.com/kat-co/vala"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
// Config object used to initialize configuration
type Config struct {
// The config file path, overwritten in tests to point to a tmp file
File string
// Specify which environment to load, empty string means pull the
// env from the configuration file, cmdline and env vars.
LoadEnv string
// Prefix the environment variables with this name so that the config
// variables don't conflict with other abcweb apps
EnvPrefix string
}
// NewConfig creates a new Config object used to initialize configuration
func NewConfig(envPrefix string) *Config {
return &Config{
File: "config.toml",
EnvPrefix: envPrefix,
}
}
// AppConfig struct includes the necessary abcweb config components.
// If you'd rather use your own struct so that you can add new values
// to your configuration you can do so, but make sure you include
// *ServerConfig objects and *DBConfig objects like below (if desired).
//
// If you do not wish to use a database then you can exclude the DBConfig
// struct in your own struct, but if using this AppConfig struct then
// DBConfig MUST be initialized and database configuration must be present.
type AppConfig struct {
// The active environment section
Env string `toml:"env" mapstructure:"env" env:"ENV"`
Server ServerConfig `toml:"server" mapstructure:"server"`
DB DBConfig `toml:"db" mapstructure:"db"`
}
// ServerConfig is config for the app loaded through environment variables,
// command line, or the config.toml file.
type ServerConfig struct {
// LiveReload enabled or disabled
LiveReload bool `toml:"live-reload" mapstructure:"live-reload" env:"SERVER_LIVE_RELOAD"`
// Use the production logger (JSON and log level warn) or the
// development logger (console and log level info)
ProdLogger bool `toml:"prod-logger" mapstructure:"prod-logger" env:"SERVER_PROD_LOGGER"`
// http bind address. ":<port>" for all interfaces
Bind string `toml:"bind" mapstructure:"bind" env:"SERVER_BIND"`
// https bind address. ":<port>" for all interfaces
TLSBind string `toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" |
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v.NumField(); i++ {
cv := v.Field(i)
tag := v.Type().Field(i).Tag
ms := tag.Get("mapstructure")
env := tag.Get("env")
if cv.Kind() == reflect.Ptr {
nv := reflect.Indirect(cv)
// If it has no mapstructure set then fail gracefully,
// because it's probably not a field that should be read by viper.
// For example, a pointer to something that is late-initialized
// and isn't loaded by Bind or present in the config file.
if !nv.IsValid() && ms == "" {
continue
} else if !nv.IsValid() {
return nil, fmt.Errorf("cannot access non-initialized pointer %#v", cv)
}
// Only indirect struct types, if they're valid
if nv.Kind() == reflect.Struct {
cv = nv
}
}
// nc = newchain
var nc string
if chain != "" {
nc = strings.Join([]string{chain, ms}, ".")
} else {
nc = ms
}
switch cv.Kind() {
case reflect.Struct:
m, err := getTagMappingsRecursive(nc, cv)
if err != nil {
return nil, err
}
mappings = append(mappings, m...)
default:
if env != "" && ms != "" {
mappings = append(mappings, Mapping{chain: nc, env: env})
}
}
}
return mappings, nil
}
// GetTagMappings returns the viper .BindEnv mappings for an entire config
// struct.
func GetTagMappings(cfg interface{}) (Mappings, error) {
return getTagMappingsRecursive("", reflect.Indirect(reflect.ValueOf(cfg)))
}
// NewFlagSet creates the set of flags specific to the server and db config
// and the root level config (like --version, --env)
func NewFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
flags.AddFlagSet(NewRootFlagSet())
flags.AddFlagSet(NewServerFlagSet())
flags.AddFlagSet(NewDBFlagSet())
return flags
}
// NewRootFlagSet returns a list of top level flags (flags that arent contained
// inside an environment section in the config)
func NewRootFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// root level flags
flags.BoolP("version", "", false, "Display the build version hash")
flags.StringP("env", "e", "prod", "The config files environment to load")
return flags
}
// NewServerFlagSet returns a list of flags contained within the [server]
// section of a config
func NewServerFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// server subsection flags
flags.BoolP("server.live-reload", "", false, "Enable or disable LiveReload")
flags.BoolP("server.prod-logger", "", true, "Use the production logger, JSON and log level warn")
flags.StringP("server.bind", "", "", `HTTP bind address, eg: ":80"`)
flags.StringP("server.tls-bind", "", "", `HTTPS bind address, eg: ":443"`)
flags.StringP("server.tls-cert-file", "", "", "TLS certificate file path")
flags.StringP("server.tls-key-file", "", "", "TLS key file path")
flags.StringP("server.public-path", "", "public", "The path to the public folder containing assets")
flags.DurationP("server.read-timeout", "", time.Second*10, "Maximum duration before timing out read of the request")
flags.DurationP("server.write-timeout", "", time.Second*15, "Maximum duration before timing out write of the response")
flags.DurationP("server.idle-timeout", "", time.Second*120, "Maximum duration before timing out idle keep-alive connection")
// manifest.json is created as a part of the gulp production "build" task,
// it maps fingerprinted asset names to regular asset names, for example:
// {"js/main.css": "js/e2a3ff9-main.css"}.
// This should only be set to true if doing asset fingerprinting.
flags.BoolP("server.assets-manifest", "", true, "Use manifest.json for mapping asset names to fingerprinted assets")
// This should be used in development mode to prevent browser caching of assets
flags.BoolP("server.assets-no-cache", "", false, "Disable browsers caching asset files by setting response headers")
// This should be used in development mode to avoid having to reload the
// server on every template file modification.
flags.BoolP("server.render-recompile", "", false, "Enable recompilation of the template on each render")
// Defined in app/sessions.go -- Usually cookie storer for dev and disk storer for prod.
flags.BoolP("server.sessions-dev-storer", "", false, "Use the development mode sessions storer (defined in app/sessions.go)")
return flags
}
// NewDBFlagSet returns a list of flags contained within the [db] section
// of a config
func NewDBFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// db subsection flags
flags.StringP("db.dbname", "", "", "The database name to connect to")
flags.StringP("db.host", "", "", "The database hostname, e.g localhost")
flags.IntP("db.port", "", 0, "The database port")
flags.StringP("db.user", "", "", "The database username")
flags.StringP("db.pass", "", "", "The database password")
flags.StringP("db.sslmode", "", "", "The database sslmode")
flags.BoolP("db.enforce-migrations", "", true, "Throw error on app start if database is not using latest migration")
return flags
}
| {
env = v.GetString("env")
} | conditional_block |
config.go | package abcconfig
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/friendsofgo/errors"
"github.com/kat-co/vala"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
// Config object used to initialize configuration
type Config struct {
// The config file path, overwritten in tests to point to a tmp file
File string
// Specify which environment to load, empty string means pull the
// env from the configuration file, cmdline and env vars.
LoadEnv string
// Prefix the environment variables with this name so that the config
// variables don't conflict with other abcweb apps
EnvPrefix string
}
// NewConfig creates a new Config object used to initialize configuration
func NewConfig(envPrefix string) *Config {
return &Config{
File: "config.toml",
EnvPrefix: envPrefix,
}
}
// AppConfig struct includes the necessary abcweb config components.
// If you'd rather use your own struct so that you can add new values
// to your configuration you can do so, but make sure you include
// *ServerConfig objects and *DBConfig objects like below (if desired).
//
// If you do not wish to use a database then you can exclude the DBConfig
// struct in your own struct, but if using this AppConfig struct then
// DBConfig MUST be initialized and database configuration must be present.
type AppConfig struct {
// The active environment section
Env string `toml:"env" mapstructure:"env" env:"ENV"`
Server ServerConfig `toml:"server" mapstructure:"server"`
DB DBConfig `toml:"db" mapstructure:"db"`
}
// ServerConfig is config for the app loaded through environment variables,
// command line, or the config.toml file.
type ServerConfig struct {
// LiveReload enabled or disabled
LiveReload bool `toml:"live-reload" mapstructure:"live-reload" env:"SERVER_LIVE_RELOAD"`
// Use the production logger (JSON and log level warn) or the
// development logger (console and log level info)
ProdLogger bool `toml:"prod-logger" mapstructure:"prod-logger" env:"SERVER_PROD_LOGGER"`
// http bind address. ":<port>" for all interfaces
Bind string `toml:"bind" mapstructure:"bind" env:"SERVER_BIND"`
// https bind address. ":<port>" for all interfaces
TLSBind string `toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v.NumField(); i++ {
cv := v.Field(i)
tag := v.Type().Field(i).Tag
ms := tag.Get("mapstructure")
env := tag.Get("env")
if cv.Kind() == reflect.Ptr {
nv := reflect.Indirect(cv)
// If it has no mapstructure set then fail gracefully,
// because it's probably not a field that should be read by viper.
// For example, a pointer to something that is late-initialized
// and isn't loaded by Bind or present in the config file.
if !nv.IsValid() && ms == "" {
continue
} else if !nv.IsValid() {
return nil, fmt.Errorf("cannot access non-initialized pointer %#v", cv)
}
// Only indirect struct types, if they're valid
if nv.Kind() == reflect.Struct {
cv = nv
}
}
// nc = newchain
var nc string
if chain != "" {
nc = strings.Join([]string{chain, ms}, ".")
} else {
nc = ms
}
switch cv.Kind() {
case reflect.Struct:
m, err := getTagMappingsRecursive(nc, cv)
if err != nil {
return nil, err
}
mappings = append(mappings, m...)
default:
if env != "" && ms != "" {
mappings = append(mappings, Mapping{chain: nc, env: env})
}
}
}
return mappings, nil
}
// GetTagMappings returns the viper .BindEnv mappings for an entire config
// struct.
func GetTagMappings(cfg interface{}) (Mappings, error) {
return getTagMappingsRecursive("", reflect.Indirect(reflect.ValueOf(cfg)))
}
// NewFlagSet creates the set of flags specific to the server and db config
// and the root level config (like --version, --env)
func NewFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
flags.AddFlagSet(NewRootFlagSet())
flags.AddFlagSet(NewServerFlagSet())
flags.AddFlagSet(NewDBFlagSet())
return flags
}
// NewRootFlagSet returns a list of top level flags (flags that arent contained
// inside an environment section in the config)
func NewRootFlagSet() *pflag.FlagSet |
// NewServerFlagSet returns a list of flags contained within the [server]
// section of a config
func NewServerFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// server subsection flags
flags.BoolP("server.live-reload", "", false, "Enable or disable LiveReload")
flags.BoolP("server.prod-logger", "", true, "Use the production logger, JSON and log level warn")
flags.StringP("server.bind", "", "", `HTTP bind address, eg: ":80"`)
flags.StringP("server.tls-bind", "", "", `HTTPS bind address, eg: ":443"`)
flags.StringP("server.tls-cert-file", "", "", "TLS certificate file path")
flags.StringP("server.tls-key-file", "", "", "TLS key file path")
flags.StringP("server.public-path", "", "public", "The path to the public folder containing assets")
flags.DurationP("server.read-timeout", "", time.Second*10, "Maximum duration before timing out read of the request")
flags.DurationP("server.write-timeout", "", time.Second*15, "Maximum duration before timing out write of the response")
flags.DurationP("server.idle-timeout", "", time.Second*120, "Maximum duration before timing out idle keep-alive connection")
// manifest.json is created as a part of the gulp production "build" task,
// it maps fingerprinted asset names to regular asset names, for example:
// {"js/main.css": "js/e2a3ff9-main.css"}.
// This should only be set to true if doing asset fingerprinting.
flags.BoolP("server.assets-manifest", "", true, "Use manifest.json for mapping asset names to fingerprinted assets")
// This should be used in development mode to prevent browser caching of assets
flags.BoolP("server.assets-no-cache", "", false, "Disable browsers caching asset files by setting response headers")
// This should be used in development mode to avoid having to reload the
// server on every template file modification.
flags.BoolP("server.render-recompile", "", false, "Enable recompilation of the template on each render")
// Defined in app/sessions.go -- Usually cookie storer for dev and disk storer for prod.
flags.BoolP("server.sessions-dev-storer", "", false, "Use the development mode sessions storer (defined in app/sessions.go)")
return flags
}
// NewDBFlagSet returns a list of flags contained within the [db] section
// of a config
func NewDBFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
// db subsection flags
flags.StringP("db.dbname", "", "", "The database name to connect to")
flags.StringP("db.host", "", "", "The database hostname, e.g localhost")
flags.IntP("db.port", "", 0, "The database port")
flags.StringP("db.user", "", "", "The database username")
flags.StringP("db.pass", "", "", "The database password")
flags.StringP("db.sslmode", "", "", "The database sslmode")
flags.BoolP("db.enforce-migrations", "", true, "Throw error on app start if database is not using latest migration")
return flags
}
| {
flags := &pflag.FlagSet{}
// root level flags
flags.BoolP("version", "", false, "Display the build version hash")
flags.StringP("env", "e", "prod", "The config files environment to load")
return flags
} | identifier_body |
show_alignment.py | #!/Users/alexfinck/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""This script extends an alignment consisting of upper-seq and
lower-seq with a quality line shown between upper-seq and lower-seq on
a terminal or console having at least 80 columns and configured to use
a fixed/monospaced font.
The script takes as main optional argument whether or not an input is
dna or proteins; this is an authoritative argument as the script does
not try to guess in place of the user; default is dna. The script
returns a message to the console consisting in three sections. For
each run the user is informed about pre-requisites to a succesful
usage in a first section. Then the input is parsed for consistency and
a corresponding section is printed to the console. The last section
contains the input alignement accompanied by a quality line in the
form of 3 lines repeatedly displayed and cut at default 60 columns
until the length of the alignment is reached.
"""
# "Skelett" des Programs zum Zeigen eines Alignments mit einer
# Qualitaetszeile Haltet euch bitte an diese Struktur - dort wo das
# "pass" steht muss euer Quelltext kommen. Das pass muss dazu
# geloescht werden.
# Autor: Alex Finck
# Datum der letzten Aenderung: 09.07.2020
#
# usage examples from the command line:
# > python show_alignment.py "AACTG_GTCAT" "AGTCAA_CTGA"
# > python show_alignment.py -iprotein "ACTG_GTCA" "GTCAA_CTG"
from Bio.SubsMat.MatrixInfo import pam30
import argparse
class ShowAlignment:
def __init__(self, aligntIsDna=True):
# Liste die angibt wie gut ein Match ist. Siehe Aufgabenzettel -
self.OUTPUT_WIDTH_IN_COLS = 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
|
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
"""Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for stopping
further processing is given back to the user and a ValueError is
raised. In case no inconsistency is found a summary report is also
generated and the function returns True. The function accepts 1)
only equal length for obereZeile, untereZeile 2) only the input
alphabets + INPUT_GAP_ZEICHEN ("_")
"""
rv = True
# 1) only equal length for obereZeile, untereZeile
if (len(obereZeile) != len(untereZeile)):
print("============================================================")
print("input sequences do not have the same length")
print("============================================================")
raise ValueError("Input sequences of different lengths")
# 2) only the input alphabets + INPUT_GAP_ZEICHEN ("_")
validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)
validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile)
if (
validityInObereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN and
validityInUntereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN
):
print("============================================================")
print("input is recognized as: " + self.VALID_DNA_OR_PROTEIN)
_input_type = "dna"
if not self.aligntIsDna:
_input_type = "protein"
print("input is now further processed as: " + _input_type)
print("============================================================")
else:
print("============================================================")
if (
validityInObereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"upper sequence is recognized as: " +
validityInObereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInObereZeile["residueIndex"] + 1,
validityInObereZeile["residue"]
)
)
if (
validityInUntereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"lower sequence is recognized as: " +
validityInUntereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInUntereZeile["residueIndex"] + 1,
validityInUntereZeile["residue"]
)
)
print("============================================================")
raise ValueError("Input outside of chosen alphabet.")
return(rv)
# None informUserAboutPrerequisites()
@staticmethod
def informUserAboutPrerequisites():
"""A pure side-effect function informing the user about proper use of
the script since no extra-care is taken by the script in order to
enforce a proper console configuration
"""
print("============================================================")
print("make sure your terminal is set to use fixed/monospaced fonts")
print("and displays a minimum of 60 columns")
print("============================================================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
metavar='upper_seq', type=str, nargs=1, dest='upper_seq',
help='a first sequence of an input alignment displayed' +
' on the top of the quality line'
)
parser.add_argument(
metavar='lower_seq', type=str, nargs=1, dest='lower_seq',
help='a second sequence of an input ' +
'alignment displayed below the quality line'
)
parser.add_argument(
'-i', '--input_type', default="dna", type=str,
help='dna|protein defaults to dna'
)
args = parser.parse_args()
#
# print(args.input_type)
# print(args.upper_seq)
# print(args.lower_seq)
obereZeile = args.upper_seq[0]
untereZeile = args.lower_seq[0]
if (args.input_type == "dna"):
aligntIsDna = True
elif (args.input_type == "protein"):
aligntIsDna = False
ShowAlignment.informUserAboutPrerequisites()
a = ShowAlignment(aligntIsDna)
a.showAlignment(obereZeile, untereZeile)
| qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN | conditional_block |
show_alignment.py | #!/Users/alexfinck/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""This script extends an alignment consisting of upper-seq and
lower-seq with a quality line shown between upper-seq and lower-seq on
a terminal or console having at least 80 columns and configured to use
a fixed/monospaced font.
The script takes as main optional argument whether or not an input is
dna or proteins; this is an authoritative argument as the script does
not try to guess in place of the user; default is dna. The script
returns a message to the console consisting in three sections. For
each run the user is informed about pre-requisites to a succesful
usage in a first section. Then the input is parsed for consistency and
a corresponding section is printed to the console. The last section
contains the input alignement accompanied by a quality line in the
form of 3 lines repeatedly displayed and cut at default 60 columns
until the length of the alignment is reached.
"""
# "Skelett" des Programs zum Zeigen eines Alignments mit einer
# Qualitaetszeile Haltet euch bitte an diese Struktur - dort wo das
# "pass" steht muss euer Quelltext kommen. Das pass muss dazu
# geloescht werden.
# Autor: Alex Finck
# Datum der letzten Aenderung: 09.07.2020
#
# usage examples from the command line:
# > python show_alignment.py "AACTG_GTCAT" "AGTCAA_CTGA"
# > python show_alignment.py -iprotein "ACTG_GTCA" "GTCAA_CTG"
from Bio.SubsMat.MatrixInfo import pam30
import argparse
class ShowAlignment:
def __init__(self, aligntIsDna=True):
# Liste die angibt wie gut ein Match ist. Siehe Aufgabenzettel -
self.OUTPUT_WIDTH_IN_COLS = 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
|
# None informUserAboutPrerequisites()
@staticmethod
def informUserAboutPrerequisites():
"""A pure side-effect function informing the user about proper use of
the script since no extra-care is taken by the script in order to
enforce a proper console configuration
"""
print("============================================================")
print("make sure your terminal is set to use fixed/monospaced fonts")
print("and displays a minimum of 60 columns")
print("============================================================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
metavar='upper_seq', type=str, nargs=1, dest='upper_seq',
help='a first sequence of an input alignment displayed' +
' on the top of the quality line'
)
parser.add_argument(
metavar='lower_seq', type=str, nargs=1, dest='lower_seq',
help='a second sequence of an input ' +
'alignment displayed below the quality line'
)
parser.add_argument(
'-i', '--input_type', default="dna", type=str,
help='dna|protein defaults to dna'
)
args = parser.parse_args()
#
# print(args.input_type)
# print(args.upper_seq)
# print(args.lower_seq)
obereZeile = args.upper_seq[0]
untereZeile = args.lower_seq[0]
if (args.input_type == "dna"):
aligntIsDna = True
elif (args.input_type == "protein"):
aligntIsDna = False
ShowAlignment.informUserAboutPrerequisites()
a = ShowAlignment(aligntIsDna)
a.showAlignment(obereZeile, untereZeile)
| """Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for stopping
further processing is given back to the user and a ValueError is
raised. In case no inconsistency is found a summary report is also
generated and the function returns True. The function accepts 1)
only equal length for obereZeile, untereZeile 2) only the input
alphabets + INPUT_GAP_ZEICHEN ("_")
"""
rv = True
# 1) only equal length for obereZeile, untereZeile
if (len(obereZeile) != len(untereZeile)):
print("============================================================")
print("input sequences do not have the same length")
print("============================================================")
raise ValueError("Input sequences of different lengths")
# 2) only the input alphabets + INPUT_GAP_ZEICHEN ("_")
validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)
validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile)
if (
validityInObereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN and
validityInUntereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN
):
print("============================================================")
print("input is recognized as: " + self.VALID_DNA_OR_PROTEIN)
_input_type = "dna"
if not self.aligntIsDna:
_input_type = "protein"
print("input is now further processed as: " + _input_type)
print("============================================================")
else:
print("============================================================")
if (
validityInObereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"upper sequence is recognized as: " +
validityInObereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInObereZeile["residueIndex"] + 1,
validityInObereZeile["residue"]
)
)
if (
validityInUntereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"lower sequence is recognized as: " +
validityInUntereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInUntereZeile["residueIndex"] + 1,
validityInUntereZeile["residue"]
)
)
print("============================================================")
raise ValueError("Input outside of chosen alphabet.")
return(rv) | identifier_body |
show_alignment.py | #!/Users/alexfinck/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""This script extends an alignment consisting of upper-seq and
lower-seq with a quality line shown between upper-seq and lower-seq on
a terminal or console having at least 80 columns and configured to use
a fixed/monospaced font.
The script takes as main optional argument whether or not an input is
dna or proteins; this is an authoritative argument as the script does
not try to guess in place of the user; default is dna. The script
returns a message to the console consisting in three sections. For
each run the user is informed about pre-requisites to a succesful
usage in a first section. Then the input is parsed for consistency and
a corresponding section is printed to the console. The last section
contains the input alignement accompanied by a quality line in the
form of 3 lines repeatedly displayed and cut at default 60 columns
until the length of the alignment is reached.
"""
# "Skelett" des Programs zum Zeigen eines Alignments mit einer
# Qualitaetszeile Haltet euch bitte an diese Struktur - dort wo das
# "pass" steht muss euer Quelltext kommen. Das pass muss dazu
# geloescht werden.
# Autor: Alex Finck
# Datum der letzten Aenderung: 09.07.2020
#
# usage examples from the command line:
# > python show_alignment.py "AACTG_GTCAT" "AGTCAA_CTGA"
# > python show_alignment.py -iprotein "ACTG_GTCA" "GTCAA_CTG"
from Bio.SubsMat.MatrixInfo import pam30
import argparse
class ShowAlignment:
def __init__(self, aligntIsDna=True):
# Liste die angibt wie gut ein Match ist. Siehe Aufgabenzettel -
self.OUTPUT_WIDTH_IN_COLS = 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def | (self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
"""Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for stopping
further processing is given back to the user and a ValueError is
raised. In case no inconsistency is found a summary report is also
generated and the function returns True. The function accepts 1)
only equal length for obereZeile, untereZeile 2) only the input
alphabets + INPUT_GAP_ZEICHEN ("_")
"""
rv = True
# 1) only equal length for obereZeile, untereZeile
if (len(obereZeile) != len(untereZeile)):
print("============================================================")
print("input sequences do not have the same length")
print("============================================================")
raise ValueError("Input sequences of different lengths")
# 2) only the input alphabets + INPUT_GAP_ZEICHEN ("_")
validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)
validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile)
if (
validityInObereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN and
validityInUntereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN
):
print("============================================================")
print("input is recognized as: " + self.VALID_DNA_OR_PROTEIN)
_input_type = "dna"
if not self.aligntIsDna:
_input_type = "protein"
print("input is now further processed as: " + _input_type)
print("============================================================")
else:
print("============================================================")
if (
validityInObereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"upper sequence is recognized as: " +
validityInObereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInObereZeile["residueIndex"] + 1,
validityInObereZeile["residue"]
)
)
if (
validityInUntereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"lower sequence is recognized as: " +
validityInUntereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInUntereZeile["residueIndex"] + 1,
validityInUntereZeile["residue"]
)
)
print("============================================================")
raise ValueError("Input outside of chosen alphabet.")
return(rv)
# None informUserAboutPrerequisites()
@staticmethod
def informUserAboutPrerequisites():
"""A pure side-effect function informing the user about proper use of
the script since no extra-care is taken by the script in order to
enforce a proper console configuration
"""
print("============================================================")
print("make sure your terminal is set to use fixed/monospaced fonts")
print("and displays a minimum of 60 columns")
print("============================================================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
metavar='upper_seq', type=str, nargs=1, dest='upper_seq',
help='a first sequence of an input alignment displayed' +
' on the top of the quality line'
)
parser.add_argument(
metavar='lower_seq', type=str, nargs=1, dest='lower_seq',
help='a second sequence of an input ' +
'alignment displayed below the quality line'
)
parser.add_argument(
'-i', '--input_type', default="dna", type=str,
help='dna|protein defaults to dna'
)
args = parser.parse_args()
#
# print(args.input_type)
# print(args.upper_seq)
# print(args.lower_seq)
obereZeile = args.upper_seq[0]
untereZeile = args.lower_seq[0]
if (args.input_type == "dna"):
aligntIsDna = True
elif (args.input_type == "protein"):
aligntIsDna = False
ShowAlignment.informUserAboutPrerequisites()
a = ShowAlignment(aligntIsDna)
a.showAlignment(obereZeile, untereZeile)
| __qualitaetsListeProteins | identifier_name |
show_alignment.py | #!/Users/alexfinck/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""This script extends an alignment consisting of upper-seq and
lower-seq with a quality line shown between upper-seq and lower-seq on
a terminal or console having at least 80 columns and configured to use
a fixed/monospaced font.
The script takes as main optional argument whether or not an input is
dna or proteins; this is an authoritative argument as the script does
not try to guess in place of the user; default is dna. The script
returns a message to the console consisting in three sections. For
each run the user is informed about pre-requisites to a succesful
usage in a first section. Then the input is parsed for consistency and
a corresponding section is printed to the console. The last section
contains the input alignement accompanied by a quality line in the
form of 3 lines repeatedly displayed and cut at default 60 columns | # "Skelett" des Programs zum Zeigen eines Alignments mit einer
# Qualitaetszeile Haltet euch bitte an diese Struktur - dort wo das
# "pass" steht muss euer Quelltext kommen. Das pass muss dazu
# geloescht werden.
# Autor: Alex Finck
# Datum der letzten Aenderung: 09.07.2020
#
# usage examples from the command line:
# > python show_alignment.py "AACTG_GTCAT" "AGTCAA_CTGA"
# > python show_alignment.py -iprotein "ACTG_GTCA" "GTCAA_CTG"
from Bio.SubsMat.MatrixInfo import pam30
import argparse
class ShowAlignment:
def __init__(self, aligntIsDna=True):
# Liste die angibt wie gut ein Match ist. Siehe Aufgabenzettel -
self.OUTPUT_WIDTH_IN_COLS = 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
"""Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for stopping
further processing is given back to the user and a ValueError is
raised. In case no inconsistency is found a summary report is also
generated and the function returns True. The function accepts 1)
only equal length for obereZeile, untereZeile 2) only the input
alphabets + INPUT_GAP_ZEICHEN ("_")
"""
rv = True
# 1) only equal length for obereZeile, untereZeile
if (len(obereZeile) != len(untereZeile)):
print("============================================================")
print("input sequences do not have the same length")
print("============================================================")
raise ValueError("Input sequences of different lengths")
# 2) only the input alphabets + INPUT_GAP_ZEICHEN ("_")
validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)
validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile)
if (
validityInObereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN and
validityInUntereZeile["recognizedAlphabet"] == self.VALID_DNA_OR_PROTEIN
):
print("============================================================")
print("input is recognized as: " + self.VALID_DNA_OR_PROTEIN)
_input_type = "dna"
if not self.aligntIsDna:
_input_type = "protein"
print("input is now further processed as: " + _input_type)
print("============================================================")
else:
print("============================================================")
if (
validityInObereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"upper sequence is recognized as: " +
validityInObereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInObereZeile["residueIndex"] + 1,
validityInObereZeile["residue"]
)
)
if (
validityInUntereZeile["recognizedAlphabet"] in
[self.INVALID_DNA, self.INVALID_PROTEIN]
):
print(
"lower sequence is recognized as: " +
validityInUntereZeile["recognizedAlphabet"]
)
print(
"character number {} with value '{}' could not be parsed".
format(
validityInUntereZeile["residueIndex"] + 1,
validityInUntereZeile["residue"]
)
)
print("============================================================")
raise ValueError("Input outside of chosen alphabet.")
return(rv)
# None informUserAboutPrerequisites()
@staticmethod
def informUserAboutPrerequisites():
"""A pure side-effect function informing the user about proper use of
the script since no extra-care is taken by the script in order to
enforce a proper console configuration
"""
print("============================================================")
print("make sure your terminal is set to use fixed/monospaced fonts")
print("and displays a minimum of 60 columns")
print("============================================================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
metavar='upper_seq', type=str, nargs=1, dest='upper_seq',
help='a first sequence of an input alignment displayed' +
' on the top of the quality line'
)
parser.add_argument(
metavar='lower_seq', type=str, nargs=1, dest='lower_seq',
help='a second sequence of an input ' +
'alignment displayed below the quality line'
)
parser.add_argument(
'-i', '--input_type', default="dna", type=str,
help='dna|protein defaults to dna'
)
args = parser.parse_args()
#
# print(args.input_type)
# print(args.upper_seq)
# print(args.lower_seq)
obereZeile = args.upper_seq[0]
untereZeile = args.lower_seq[0]
if (args.input_type == "dna"):
aligntIsDna = True
elif (args.input_type == "protein"):
aligntIsDna = False
ShowAlignment.informUserAboutPrerequisites()
a = ShowAlignment(aligntIsDna)
a.showAlignment(obereZeile, untereZeile) | until the length of the alignment is reached.
""" | random_line_split |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn | (
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind, .. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs, .. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| insert | identifier_name |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
} | env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind, .. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs, .. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
} |
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from( | random_line_split |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => |
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind, .. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs, .. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| {
joint.insert(&target.name, Self::Leaf(&package.id, target));
} | conditional_block |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> |
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind, .. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs, .. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
} | identifier_body |
Travel.py |
# coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
|
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
#
| DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s | identifier_body |
Travel.py | # coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
|
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
# |
# ## Linear Model | random_line_split |
Travel.py |
# coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def | (dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
#
| evalute | identifier_name |
Travel.py |
# coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
|
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
#
| MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred ) | conditional_block |
scripts.js | window.onload = function(){
var progressDivElement = document.getElementById('currentProgress');
var manifest = [
{src: "assets/fruit.json", id: "sheet1", type: "spritesheet"},
{src: "assets/Game-Break.mp3", id: "soundBreak", type: "sound"},
{src: "assets/Game-Shot.mp3", id: "soundShot",type: "sound"},
{src: "assets/Game-Death.mp3", id: "soundDeath",type: "sound"}
];
//Create loader
var loader = new createjs.LoadQueue(true, "./");
loader.on("fileload", handleFileLoad);
loader.on("progress",handleOverallProgress);
loader.on("complete",handleComplete);
loader.installPlugin(createjs.Sound);
loader.loadManifest(manifest);
var assets = []; //To store the asset files from manifest
var AudioContext; //For audio context
var audioCtx;
var loadingCompleted = false; //Only allow game start after loading is complete
var appleArray = []; //power up for longer paddle
var bananaArray = []; //power up for second ball
var spriteSheet;
function handleFileLoad(event)
{
console.log("File loaded");
assets.push(event);
}
function handleOverallProgress(event)
{
console.log('TOTAL: '+ loader.progress);
progressDivElement.style.width = (loader.progress * 100) + "%";
}
function handleComplete()
{
console.log('Loaded all files in the manifest.');
for (var i = 0; i < assets.length; i++)
{
var event = assets[i];
var result = event.result;
switch (event.item.id)
{
case 'sheet1':
spriteSheet = result;
break;
}
}
loadingCompleted = true;
}
//define paddle and bricks width and height here
const PADDLE_WIDTH = 100;
const PADDLE_HEIGHT = 20;
const BRICKS_WIDTH = 60;
const BRICKS_HEIGHT = 30;
const BALL_RADIUS = 8;
const FULL_X_SPEED = 7;
var stage;
var paddle;
var ball;
var ball2; //second ball
var bricks = [];
//configure game levels
var levelArray = [
{rowCount:1, rowbreak:false, timelimit:90, leftSpaceStartIndex:3, leftSpaceEndIndex:3, rightSpaceStartIndex:10, rightSpaceEndIndex:10},
{rowCount:5, rowbreak:false, timelimit:120, leftSpaceStartIndex:2, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:11},
{rowCount:7, rowbreak:false, timelimit:200, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:7, rightSpaceEndIndex:13},
{rowCount:11, rowbreak:true, timelimit:30, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:13},
]
var currentGameLevel = 1; //make level 1 as default for the game
var score = 0;
var lives = 3;
var scoreText;
var gameStarted = false;
//set keyboard left and right movements' initial state
var keyboardMoveLeft = false;
var keyboardMoveRight = false;
//use local storage to store data in between sessions, we add a variable to store the scores data.
var highScore = 0;
var timer;
var timerLength = 100;
var timerText;
//check if this browser supports local storage
if(typeof(Storage) !== "undefined") {
if(localStorage.highScore==undefined) //if its not in the localstorage, then set it to 0
{
localStorage.highScore = 0;
}
highScore = localStorage.highScore; //if it does, add it to the declared variable
} else {
highScore = 0;
}
stage = new createjs.Stage("testCanvas");
createjs.Touch.enable(stage);
createjs.Ticker.setFPS(60);
//createjs.Ticker.addEventListener("tick", tick);
createjs.Ticker.addEventListener("tick", stage);
createjs.Ticker.on("tick", tick);
createScoreText();
createTimerText();
addToScore(0);
createPaddle(); //create paddle before creating ball
createBall();
createBrickGrid();
//keyboard handlers
window.onkeyup = keyUpHandler;
window.onkeydown = keyDownHandler;
//if the user clicks the mouse and when the game is in the pause state, start the game.
stage.on("stagemousedown", function(event)
{
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
});
stage.on("stagemousemove", function (event) //move the paddle only if the mouse is moved.
{
if(loadingCompleted){
paddle.x = stage.mouseX;
}
});
//enlarge the stage for mobile users to finger move the paddle
stage.canvas.height = window.innerHeight;
function startLevel()
{
if(!gameStarted)
{
console.log("Start Game");
AudioContext = window.AudioContext || window.webkitAudioContext;
audioCtx = new AudioContext();
if (audioCtx.state !== 'running') {
audioCtx.resume();
}
paddle.scaleX = 1.0;
paddle.width = 100;
gameStarted = true;
ball.xSpeed = 5;
ball.ySpeed = 5;
ball.up = true;
ball.right = true;
timerLength = levelArray[currentGameLevel-1].timelimit; //timer length for each level
timer = setInterval(countDownOnGame, 1000);
}
}
function countDownOnGame() |
function keyDownHandler(e)
{
//hit left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
if(loadingCompleted){
keyboardMoveLeft = true;
}
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
updateStatusLine();
}
function tick(event) //custom tick function
{
// stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
//If no bricks left, you win
if(bricks.length == 0){
stage.update();
createjs.Sound.play("soundDeath");
clearTimeout(timer);
gameStarted = false;
alert("You won!");
window.location.href = "win.html";
}
//if the red ball exists
if(ball != null){
if(checkCollision(ball,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball,paddle);
}
//Check if we've reached the walls
if(ball.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball.right = false; //we should change the direction of ball to left
}
if(ball.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball.x = BALL_RADIUS;
ball.right = true; //move it to the right
}
if(ball.y-BALL_RADIUS<=0) //arrive the top
{
ball.y = BALL_RADIUS;
ball.up = false; //move it down
}
if(ball.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball);
ball = null;
//loseLife();
}else{
ball.lastX = ball.x; //set ball's last x value to current x value
ball.lastY = ball.y;
}
}
if(ball2 != null){
if(checkCollision(ball2,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball2,paddle);
}
if(ball2.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball2.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball2.right = false; //we should change the direction of ball to left
}
if(ball2.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball2.x = BALL_RADIUS;
ball2.right = true; //move it to the right
}
if(ball2.y-BALL_RADIUS<=0) //arrive the top
{
ball2.y = BALL_RADIUS;
ball2.up = false; //move it down
}
if(ball2.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball2);
ball2 = null;
}else{
ball2.lastX = ball2.x; //set ball's last x value to current x value
ball2.lastY = ball2.y;
}
}
if(ball == null && ball2 == null){
loseLife();
}
//Check if the paddle catch the power up (apple)
for(var i = 0; i < appleArray.length ; i++){
var apple = appleArray[i];
if(checkCollisionForPowerUps(apple,paddle)){
//remove the apple
stage.removeChild(apple);
appleArray.splice(i,1);
//extend the paddle width
paddle.width = 150;
paddle.scaleX = 1.5;
stage.update();
}
}
//Check if the paddle hit the power up (banana)
for(var i = 0; i < bananaArray.length ; i++){
var banana = bananaArray[i];
if(checkCollisionForPowerUps(banana,paddle)){
//remove the banana
stage.removeChild(banana);
bananaArray.splice(i,1);
//shot another ball when there is not a second ball on the screen
if(ball2 == null){
ball2 = new createjs.Shape();
ball2.graphics.beginFill("Green").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
ball2.x = paddle.x;
ball2.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
ball2.xSpeed = 3;
ball2.ySpeed = 3;
ball2.up = true;
ball2.right = true;
stage.addChild(ball2);
}
stage.update();
}
}
stage.update();
}
function checkCollisionForPowerUps(powerUpElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
//current left,right top and bottom border of powerUpElement
var powerUpLeftBorder = powerUpElement.x - 20;
var powerUpRightBorder = powerUpElement.x + 20;
//var powerUpTopBorder = powerUpElement.y - 20;
var powerUpBottomBorder = powerUpElement.y + 50;
// if the statement is true, the power up is inside of rectangle of the hit element
if((powerUpLeftBorder<=rightBorder) && (powerUpRightBorder >= leftBorder) && (powerUpBottomBorder <= bottomBorder) && (powerUpBottomBorder >= topBorder))
{
return true;
}
return false;
}
function checkCollision(ballElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
var previousBallLeftBorder = ballElement.lastX - BALL_RADIUS;
var previousBallRightBorder = ballElement.lastX + BALL_RADIUS;
var previousBallTopBorder = ballElement.lastY - BALL_RADIUS;
var previousBallBottomBorder = ballElement.lastY + BALL_RADIUS;
//current left,right top and bottom border of ball
var ballLeftBorder = ballElement.x - BALL_RADIUS;
var ballRightBorder = ballElement.x + BALL_RADIUS;
var ballTopBorder = ballElement.y - BALL_RADIUS;
var ballBottomBorder = ballElement.y + BALL_RADIUS;
// if the statement is true, the ball is inside of rectangle of the hit element
if((ballLeftBorder<=rightBorder) && (ballRightBorder >= leftBorder) && (ballTopBorder <= bottomBorder) && (ballBottomBorder >= topBorder))
{
if((ballTopBorder <= bottomBorder)&&(previousBallTopBorder > bottomBorder))
{
//the if statement above ensures that ball Hit from the bottom
ballElement.up = false;
ballElement.y = bottomBorder + BALL_RADIUS; //to make sure ball not entering the inside
}
if((ballBottomBorder >= topBorder)&&(previousBallBottomBorder<topBorder))
{
//Hit from the top
ballElement.up = true;
ballElement.y = topBorder - BALL_RADIUS;
}
if((ballLeftBorder<=rightBorder)&&(previousBallLeftBorder>rightBorder))
{
//Hit from the right
ballElement.right = true;
ballElement.x = rightBorder + BALL_RADIUS;
}
if((ballRightBorder >= leftBorder)&&(previousBallRightBorder < leftBorder))
{
//Hit from the left
ballElement.right = false;
ballElement.x = leftBorder - BALL_RADIUS;
}
//update the lastx and lasty
ballElement.lastX = ballElement.x;
ballElement.lastY = ballElement.y;
return true;
}
return false;
}
function dropApple(hitElement){
//create new apple based on the location of the brick
var apple = new createjs.Sprite(spriteSheet, "apple");
apple.x = hitElement.x;
apple.y = hitElement.y + 20;
stage.addChild(apple);
appleArray.push(apple);
}
function dropBanana(hitElement){
//create new banana base on the location of the brick
var banana = new createjs.Sprite(spriteSheet, "banana");
banana.x = hitElement.x ;
banana.y = hitElement.y + 20;
stage.addChild(banana);
bananaArray.push(banana);
}
function newBallXSpeedAfterCollision(ballElement,hitElement)
{
var startPoint = hitElement.x - hitElement.getBounds().width/2;
var midPoint = hitElement.x;
var endPoint = hitElement.x + hitElement.getBounds().width/2;
if(ballElement.x<midPoint) //once we hit left part
{
ballElement.right = false;
ballElement.xSpeed = FULL_X_SPEED - ((ballElement.x - startPoint)/(midPoint-startPoint)) * FULL_X_SPEED
}
else //once we hit the right part
{
ballElement.xSpeed = FULL_X_SPEED - ((endPoint - ballElement.x)/(endPoint-midPoint)) * FULL_X_SPEED
ballElement.right = true;
}
}
function createBrickGrid()
{
removeAllBricks();
var currentLevelConfig = levelArray[currentGameLevel-1];
for(var i = 0;i<14;i++) //i value is in charge of x value, means column
for(var j = 0;j<currentLevelConfig.rowCount;j++) //j value in charge of y value, means row
{
//If line break is set, and current row index is on even number, skip this row
if(currentLevelConfig.rowbreak == true && j%2 == 0){
continue;
}
//Only draw the columns of bricks as per the level configuration
if( (i>=currentLevelConfig.leftSpaceStartIndex && i<=currentLevelConfig.leftSpaceEndIndex) ||
(i>=currentLevelConfig.rightSpaceStartIndex && i<=currentLevelConfig.rightSpaceEndIndex)
){
var randomColor = getBrickColor(); //
//10 is the space between each brick
createBrick(i*(BRICKS_WIDTH+10)+40,j*(BRICKS_HEIGHT+5)+20, randomColor);
}
}
}
function getBrickColor(){
//Use math random to decide the color of this brick
var randomNumber = Math.random();
//if 0.6 - 1 return blue, 0.4-0.6 return yellow, 0.2-0.4 return orange, 0-0.2 return black
if(randomNumber > 0.6){
return "blue"; //level 1 brick
}else if(randomNumber > 0.4){
return "yellow"; //level 2 brick
}else if(randomNumber > 0.2){
return "orange"; //level 3 brick
}else{
return "black"; //level 4 brick
}
}
//Create single brick
function createBrick(x,y,color)
{
var brick = new createjs.Shape();
brick.graphics.beginFill(color);
brick.graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
brick.graphics.endFill(); //complete the drawing of the shape
//Set the name with color, so that we can use the name to determine the processing of brick hit
brick.name = color;
//change the brick registration point to let it shrink from center instead of top left corner
brick.regX = BRICKS_WIDTH/2;
brick.regY = BRICKS_HEIGHT/2;
//move the brick to see the entire brick
brick.x = x;
brick.y = y;
brick.setBounds(brick.regX,brick.regY,BRICKS_WIDTH,BRICKS_HEIGHT);
stage.addChild(brick); //add created object to the stage
bricks.push(brick); //push each brick to bricks array
}
function removeAllBricks(){
//destroy all bricks
for(var i=0;i<bricks.length;i++){
destroyBrickInstantly(bricks[i]);
bricks.splice(i,1);
i--;
}
}
function createBall()
{
ball = new createjs.Shape();
ball.graphics.beginFill("Red").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
//move the ball to the middle of the paddle for initial position
ball.x = paddle.x;
ball.y = paddle.y- PADDLE_HEIGHT/2 - BALL_RADIUS; //make sure deduct half of paddle height and ball radius
stage.addChild(ball);
ball.up = true;
ball.right = true; //determine whether ball goes up or down
ball.xSpeed = 0; // initial state
ball.ySpeed = 0; //initial state
//save the previous location of ball
ball.lastX = 0;
ball.lastY = 0;
}
function destroyBrick(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},500) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function destroyBrickInstantly(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},1) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function removeBrickFromScreen(brick)
{
stage.removeChild(brick)
}
function createPaddle()
{
paddle = new createjs.Shape();
paddle.width = PADDLE_WIDTH;
paddle.height = PADDLE_HEIGHT;
paddle.graphics.beginFill('#000000').drawRect(0, 0, paddle.width, paddle.height); //chain the commands
// paddle.y = 200;
paddle.x = stage.canvas.width/2 - PADDLE_WIDTH/2; //make sure its in the exact middle
paddle.y = stage.canvas.height * 0.9; //leave some space at the bottom for the ball to fall
paddle.regX = PADDLE_WIDTH/2; //add the registration point to the paddle;exact the middle of paddle
paddle.regY = PADDLE_HEIGHT/2;
paddle.setBounds(paddle.regX,paddle.regY,PADDLE_WIDTH,PADDLE_HEIGHT); //to set this to use getbounds later
stage.addChild(paddle);
}
function changeLevel(level){
//do not allow change level during game play
if(!gameStarted){
currentGameLevel = level;
createBrickGrid();
timerLength = levelArray[currentGameLevel-1].timelimit; //different time length for different level
updateTimerLine();
}
}
//add button event handler to change levels
document.getElementById("buttonLv1").addEventListener("click", function(){ changeLevel(1); });
document.getElementById("buttonLv2").addEventListener("click", function(){ changeLevel(2); });
document.getElementById("buttonLv3").addEventListener("click", function(){ changeLevel(3); });
document.getElementById("buttonLv4").addEventListener("click", function(){ changeLevel(4); });
};
| {
timerLength--;
console.log(timerLength);
updateTimerLine();
if(timerLength<1){
clearTimeout(timer);
loseLife();
}
} | identifier_body |
scripts.js | window.onload = function(){
var progressDivElement = document.getElementById('currentProgress');
var manifest = [
{src: "assets/fruit.json", id: "sheet1", type: "spritesheet"},
{src: "assets/Game-Break.mp3", id: "soundBreak", type: "sound"},
{src: "assets/Game-Shot.mp3", id: "soundShot",type: "sound"},
{src: "assets/Game-Death.mp3", id: "soundDeath",type: "sound"}
];
//Create loader
var loader = new createjs.LoadQueue(true, "./");
loader.on("fileload", handleFileLoad);
loader.on("progress",handleOverallProgress);
loader.on("complete",handleComplete);
loader.installPlugin(createjs.Sound);
loader.loadManifest(manifest);
var assets = []; //To store the asset files from manifest
var AudioContext; //For audio context
var audioCtx;
var loadingCompleted = false; //Only allow game start after loading is complete
var appleArray = []; //power up for longer paddle
var bananaArray = []; //power up for second ball
var spriteSheet;
function handleFileLoad(event)
{
console.log("File loaded");
assets.push(event);
}
function handleOverallProgress(event)
{
console.log('TOTAL: '+ loader.progress);
progressDivElement.style.width = (loader.progress * 100) + "%";
}
function handleComplete()
{
console.log('Loaded all files in the manifest.');
for (var i = 0; i < assets.length; i++)
{
var event = assets[i];
var result = event.result;
switch (event.item.id)
{
case 'sheet1':
spriteSheet = result;
break;
}
}
loadingCompleted = true;
}
//define paddle and bricks width and height here
const PADDLE_WIDTH = 100;
const PADDLE_HEIGHT = 20;
const BRICKS_WIDTH = 60;
const BRICKS_HEIGHT = 30;
const BALL_RADIUS = 8;
const FULL_X_SPEED = 7;
var stage;
var paddle;
var ball;
var ball2; //second ball
var bricks = [];
//configure game levels
var levelArray = [
{rowCount:1, rowbreak:false, timelimit:90, leftSpaceStartIndex:3, leftSpaceEndIndex:3, rightSpaceStartIndex:10, rightSpaceEndIndex:10},
{rowCount:5, rowbreak:false, timelimit:120, leftSpaceStartIndex:2, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:11},
{rowCount:7, rowbreak:false, timelimit:200, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:7, rightSpaceEndIndex:13},
{rowCount:11, rowbreak:true, timelimit:30, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:13},
]
var currentGameLevel = 1; //make level 1 as default for the game
var score = 0;
var lives = 3;
var scoreText;
var gameStarted = false;
//set keyboard left and right movements' initial state
var keyboardMoveLeft = false;
var keyboardMoveRight = false;
//use local storage to store data in between sessions, we add a variable to store the scores data.
var highScore = 0;
var timer;
var timerLength = 100;
var timerText;
//check if this browser supports local storage
if(typeof(Storage) !== "undefined") {
if(localStorage.highScore==undefined) //if its not in the localstorage, then set it to 0
{
localStorage.highScore = 0;
}
highScore = localStorage.highScore; //if it does, add it to the declared variable
} else {
highScore = 0;
}
stage = new createjs.Stage("testCanvas");
createjs.Touch.enable(stage);
createjs.Ticker.setFPS(60);
//createjs.Ticker.addEventListener("tick", tick);
createjs.Ticker.addEventListener("tick", stage);
createjs.Ticker.on("tick", tick);
createScoreText();
createTimerText();
addToScore(0);
createPaddle(); //create paddle before creating ball
createBall();
createBrickGrid();
//keyboard handlers
window.onkeyup = keyUpHandler;
window.onkeydown = keyDownHandler;
//if the user clicks the mouse and when the game is in the pause state, start the game.
stage.on("stagemousedown", function(event)
{
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
});
stage.on("stagemousemove", function (event) //move the paddle only if the mouse is moved.
{
if(loadingCompleted){
paddle.x = stage.mouseX;
}
});
//enlarge the stage for mobile users to finger move the paddle
stage.canvas.height = window.innerHeight;
function startLevel()
{
if(!gameStarted)
{
console.log("Start Game");
AudioContext = window.AudioContext || window.webkitAudioContext;
audioCtx = new AudioContext();
if (audioCtx.state !== 'running') {
audioCtx.resume();
}
paddle.scaleX = 1.0;
paddle.width = 100;
gameStarted = true;
ball.xSpeed = 5;
ball.ySpeed = 5;
ball.up = true;
ball.right = true;
timerLength = levelArray[currentGameLevel-1].timelimit; //timer length for each level
timer = setInterval(countDownOnGame, 1000);
}
}
function countDownOnGame(){
timerLength--;
console.log(timerLength);
updateTimerLine();
if(timerLength<1){
clearTimeout(timer);
loseLife();
}
}
function keyDownHandler(e)
{
//hit left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
if(loadingCompleted) |
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
updateStatusLine();
}
function tick(event) //custom tick function
{
// stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
//If no bricks left, you win
if(bricks.length == 0){
stage.update();
createjs.Sound.play("soundDeath");
clearTimeout(timer);
gameStarted = false;
alert("You won!");
window.location.href = "win.html";
}
//if the red ball exists
if(ball != null){
if(checkCollision(ball,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball,paddle);
}
//Check if we've reached the walls
if(ball.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball.right = false; //we should change the direction of ball to left
}
if(ball.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball.x = BALL_RADIUS;
ball.right = true; //move it to the right
}
if(ball.y-BALL_RADIUS<=0) //arrive the top
{
ball.y = BALL_RADIUS;
ball.up = false; //move it down
}
if(ball.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball);
ball = null;
//loseLife();
}else{
ball.lastX = ball.x; //set ball's last x value to current x value
ball.lastY = ball.y;
}
}
if(ball2 != null){
if(checkCollision(ball2,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball2,paddle);
}
if(ball2.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball2.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball2.right = false; //we should change the direction of ball to left
}
if(ball2.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball2.x = BALL_RADIUS;
ball2.right = true; //move it to the right
}
if(ball2.y-BALL_RADIUS<=0) //arrive the top
{
ball2.y = BALL_RADIUS;
ball2.up = false; //move it down
}
if(ball2.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball2);
ball2 = null;
}else{
ball2.lastX = ball2.x; //set ball's last x value to current x value
ball2.lastY = ball2.y;
}
}
if(ball == null && ball2 == null){
loseLife();
}
//Check if the paddle catch the power up (apple)
for(var i = 0; i < appleArray.length ; i++){
var apple = appleArray[i];
if(checkCollisionForPowerUps(apple,paddle)){
//remove the apple
stage.removeChild(apple);
appleArray.splice(i,1);
//extend the paddle width
paddle.width = 150;
paddle.scaleX = 1.5;
stage.update();
}
}
//Check if the paddle hit the power up (banana)
for(var i = 0; i < bananaArray.length ; i++){
var banana = bananaArray[i];
if(checkCollisionForPowerUps(banana,paddle)){
//remove the banana
stage.removeChild(banana);
bananaArray.splice(i,1);
//shot another ball when there is not a second ball on the screen
if(ball2 == null){
ball2 = new createjs.Shape();
ball2.graphics.beginFill("Green").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
ball2.x = paddle.x;
ball2.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
ball2.xSpeed = 3;
ball2.ySpeed = 3;
ball2.up = true;
ball2.right = true;
stage.addChild(ball2);
}
stage.update();
}
}
stage.update();
}
function checkCollisionForPowerUps(powerUpElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
//current left,right top and bottom border of powerUpElement
var powerUpLeftBorder = powerUpElement.x - 20;
var powerUpRightBorder = powerUpElement.x + 20;
//var powerUpTopBorder = powerUpElement.y - 20;
var powerUpBottomBorder = powerUpElement.y + 50;
// if the statement is true, the power up is inside of rectangle of the hit element
if((powerUpLeftBorder<=rightBorder) && (powerUpRightBorder >= leftBorder) && (powerUpBottomBorder <= bottomBorder) && (powerUpBottomBorder >= topBorder))
{
return true;
}
return false;
}
function checkCollision(ballElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
var previousBallLeftBorder = ballElement.lastX - BALL_RADIUS;
var previousBallRightBorder = ballElement.lastX + BALL_RADIUS;
var previousBallTopBorder = ballElement.lastY - BALL_RADIUS;
var previousBallBottomBorder = ballElement.lastY + BALL_RADIUS;
//current left,right top and bottom border of ball
var ballLeftBorder = ballElement.x - BALL_RADIUS;
var ballRightBorder = ballElement.x + BALL_RADIUS;
var ballTopBorder = ballElement.y - BALL_RADIUS;
var ballBottomBorder = ballElement.y + BALL_RADIUS;
// if the statement is true, the ball is inside of rectangle of the hit element
if((ballLeftBorder<=rightBorder) && (ballRightBorder >= leftBorder) && (ballTopBorder <= bottomBorder) && (ballBottomBorder >= topBorder))
{
if((ballTopBorder <= bottomBorder)&&(previousBallTopBorder > bottomBorder))
{
//the if statement above ensures that ball Hit from the bottom
ballElement.up = false;
ballElement.y = bottomBorder + BALL_RADIUS; //to make sure ball not entering the inside
}
if((ballBottomBorder >= topBorder)&&(previousBallBottomBorder<topBorder))
{
//Hit from the top
ballElement.up = true;
ballElement.y = topBorder - BALL_RADIUS;
}
if((ballLeftBorder<=rightBorder)&&(previousBallLeftBorder>rightBorder))
{
//Hit from the right
ballElement.right = true;
ballElement.x = rightBorder + BALL_RADIUS;
}
if((ballRightBorder >= leftBorder)&&(previousBallRightBorder < leftBorder))
{
//Hit from the left
ballElement.right = false;
ballElement.x = leftBorder - BALL_RADIUS;
}
//update the lastx and lasty
ballElement.lastX = ballElement.x;
ballElement.lastY = ballElement.y;
return true;
}
return false;
}
function dropApple(hitElement){
//create new apple based on the location of the brick
var apple = new createjs.Sprite(spriteSheet, "apple");
apple.x = hitElement.x;
apple.y = hitElement.y + 20;
stage.addChild(apple);
appleArray.push(apple);
}
function dropBanana(hitElement){
//create new banana base on the location of the brick
var banana = new createjs.Sprite(spriteSheet, "banana");
banana.x = hitElement.x ;
banana.y = hitElement.y + 20;
stage.addChild(banana);
bananaArray.push(banana);
}
function newBallXSpeedAfterCollision(ballElement,hitElement)
{
var startPoint = hitElement.x - hitElement.getBounds().width/2;
var midPoint = hitElement.x;
var endPoint = hitElement.x + hitElement.getBounds().width/2;
if(ballElement.x<midPoint) //once we hit left part
{
ballElement.right = false;
ballElement.xSpeed = FULL_X_SPEED - ((ballElement.x - startPoint)/(midPoint-startPoint)) * FULL_X_SPEED
}
else //once we hit the right part
{
ballElement.xSpeed = FULL_X_SPEED - ((endPoint - ballElement.x)/(endPoint-midPoint)) * FULL_X_SPEED
ballElement.right = true;
}
}
function createBrickGrid()
{
removeAllBricks();
var currentLevelConfig = levelArray[currentGameLevel-1];
for(var i = 0;i<14;i++) //i value is in charge of x value, means column
for(var j = 0;j<currentLevelConfig.rowCount;j++) //j value in charge of y value, means row
{
//If line break is set, and current row index is on even number, skip this row
if(currentLevelConfig.rowbreak == true && j%2 == 0){
continue;
}
//Only draw the columns of bricks as per the level configuration
if( (i>=currentLevelConfig.leftSpaceStartIndex && i<=currentLevelConfig.leftSpaceEndIndex) ||
(i>=currentLevelConfig.rightSpaceStartIndex && i<=currentLevelConfig.rightSpaceEndIndex)
){
var randomColor = getBrickColor(); //
//10 is the space between each brick
createBrick(i*(BRICKS_WIDTH+10)+40,j*(BRICKS_HEIGHT+5)+20, randomColor);
}
}
}
function getBrickColor(){
//Use math random to decide the color of this brick
var randomNumber = Math.random();
//if 0.6 - 1 return blue, 0.4-0.6 return yellow, 0.2-0.4 return orange, 0-0.2 return black
if(randomNumber > 0.6){
return "blue"; //level 1 brick
}else if(randomNumber > 0.4){
return "yellow"; //level 2 brick
}else if(randomNumber > 0.2){
return "orange"; //level 3 brick
}else{
return "black"; //level 4 brick
}
}
//Create single brick
function createBrick(x,y,color)
{
var brick = new createjs.Shape();
brick.graphics.beginFill(color);
brick.graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
brick.graphics.endFill(); //complete the drawing of the shape
//Set the name with color, so that we can use the name to determine the processing of brick hit
brick.name = color;
//change the brick registration point to let it shrink from center instead of top left corner
brick.regX = BRICKS_WIDTH/2;
brick.regY = BRICKS_HEIGHT/2;
//move the brick to see the entire brick
brick.x = x;
brick.y = y;
brick.setBounds(brick.regX,brick.regY,BRICKS_WIDTH,BRICKS_HEIGHT);
stage.addChild(brick); //add created object to the stage
bricks.push(brick); //push each brick to bricks array
}
function removeAllBricks(){
//destroy all bricks
for(var i=0;i<bricks.length;i++){
destroyBrickInstantly(bricks[i]);
bricks.splice(i,1);
i--;
}
}
function createBall()
{
ball = new createjs.Shape();
ball.graphics.beginFill("Red").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
//move the ball to the middle of the paddle for initial position
ball.x = paddle.x;
ball.y = paddle.y- PADDLE_HEIGHT/2 - BALL_RADIUS; //make sure deduct half of paddle height and ball radius
stage.addChild(ball);
ball.up = true;
ball.right = true; //determine whether ball goes up or down
ball.xSpeed = 0; // initial state
ball.ySpeed = 0; //initial state
//save the previous location of ball
ball.lastX = 0;
ball.lastY = 0;
}
function destroyBrick(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},500) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function destroyBrickInstantly(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},1) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function removeBrickFromScreen(brick)
{
stage.removeChild(brick)
}
function createPaddle()
{
paddle = new createjs.Shape();
paddle.width = PADDLE_WIDTH;
paddle.height = PADDLE_HEIGHT;
paddle.graphics.beginFill('#000000').drawRect(0, 0, paddle.width, paddle.height); //chain the commands
// paddle.y = 200;
paddle.x = stage.canvas.width/2 - PADDLE_WIDTH/2; //make sure its in the exact middle
paddle.y = stage.canvas.height * 0.9; //leave some space at the bottom for the ball to fall
paddle.regX = PADDLE_WIDTH/2; //add the registration point to the paddle;exact the middle of paddle
paddle.regY = PADDLE_HEIGHT/2;
paddle.setBounds(paddle.regX,paddle.regY,PADDLE_WIDTH,PADDLE_HEIGHT); //to set this to use getbounds later
stage.addChild(paddle);
}
function changeLevel(level){
//do not allow change level during game play
if(!gameStarted){
currentGameLevel = level;
createBrickGrid();
timerLength = levelArray[currentGameLevel-1].timelimit; //different time length for different level
updateTimerLine();
}
}
//add button event handler to change levels
document.getElementById("buttonLv1").addEventListener("click", function(){ changeLevel(1); });
document.getElementById("buttonLv2").addEventListener("click", function(){ changeLevel(2); });
document.getElementById("buttonLv3").addEventListener("click", function(){ changeLevel(3); });
document.getElementById("buttonLv4").addEventListener("click", function(){ changeLevel(4); });
};
| {
keyboardMoveLeft = true;
} | conditional_block |
scripts.js | window.onload = function(){
var progressDivElement = document.getElementById('currentProgress');
var manifest = [
{src: "assets/fruit.json", id: "sheet1", type: "spritesheet"},
{src: "assets/Game-Break.mp3", id: "soundBreak", type: "sound"},
{src: "assets/Game-Shot.mp3", id: "soundShot",type: "sound"},
{src: "assets/Game-Death.mp3", id: "soundDeath",type: "sound"}
];
//Create loader
var loader = new createjs.LoadQueue(true, "./");
loader.on("fileload", handleFileLoad);
loader.on("progress",handleOverallProgress);
loader.on("complete",handleComplete);
loader.installPlugin(createjs.Sound);
loader.loadManifest(manifest);
var assets = []; //To store the asset files from manifest
var AudioContext; //For audio context
var audioCtx;
var loadingCompleted = false; //Only allow game start after loading is complete
var appleArray = []; //power up for longer paddle
var bananaArray = []; //power up for second ball
var spriteSheet;
function handleFileLoad(event)
{
console.log("File loaded");
assets.push(event);
}
function handleOverallProgress(event)
{
console.log('TOTAL: '+ loader.progress);
progressDivElement.style.width = (loader.progress * 100) + "%";
}
function handleComplete()
{
console.log('Loaded all files in the manifest.');
for (var i = 0; i < assets.length; i++)
{
var event = assets[i];
var result = event.result;
switch (event.item.id)
{
case 'sheet1':
spriteSheet = result;
break;
}
}
loadingCompleted = true;
}
//define paddle and bricks width and height here
const PADDLE_WIDTH = 100;
const PADDLE_HEIGHT = 20;
const BRICKS_WIDTH = 60;
const BRICKS_HEIGHT = 30;
const BALL_RADIUS = 8;
const FULL_X_SPEED = 7;
var stage;
var paddle;
var ball;
var ball2; //second ball
var bricks = [];
//configure game levels
var levelArray = [
{rowCount:1, rowbreak:false, timelimit:90, leftSpaceStartIndex:3, leftSpaceEndIndex:3, rightSpaceStartIndex:10, rightSpaceEndIndex:10},
{rowCount:5, rowbreak:false, timelimit:120, leftSpaceStartIndex:2, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:11},
{rowCount:7, rowbreak:false, timelimit:200, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:7, rightSpaceEndIndex:13},
{rowCount:11, rowbreak:true, timelimit:30, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:13},
]
var currentGameLevel = 1; //make level 1 as default for the game
var score = 0;
var lives = 3;
var scoreText;
var gameStarted = false;
//set keyboard left and right movements' initial state
var keyboardMoveLeft = false;
var keyboardMoveRight = false;
//use local storage to store data in between sessions, we add a variable to store the scores data.
var highScore = 0;
var timer;
var timerLength = 100;
var timerText;
//check if this browser supports local storage
if(typeof(Storage) !== "undefined") {
if(localStorage.highScore==undefined) //if its not in the localstorage, then set it to 0
{
localStorage.highScore = 0;
}
highScore = localStorage.highScore; //if it does, add it to the declared variable
} else {
highScore = 0;
}
stage = new createjs.Stage("testCanvas");
createjs.Touch.enable(stage);
createjs.Ticker.setFPS(60);
//createjs.Ticker.addEventListener("tick", tick);
createjs.Ticker.addEventListener("tick", stage);
createjs.Ticker.on("tick", tick);
createScoreText();
createTimerText();
addToScore(0);
createPaddle(); //create paddle before creating ball
createBall();
createBrickGrid();
//keyboard handlers
window.onkeyup = keyUpHandler;
window.onkeydown = keyDownHandler;
//if the user clicks the mouse and when the game is in the pause state, start the game.
stage.on("stagemousedown", function(event)
{
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
});
stage.on("stagemousemove", function (event) //move the paddle only if the mouse is moved.
{
if(loadingCompleted){
paddle.x = stage.mouseX;
}
});
//enlarge the stage for mobile users to finger move the paddle
stage.canvas.height = window.innerHeight;
function startLevel()
{
if(!gameStarted)
{
console.log("Start Game");
AudioContext = window.AudioContext || window.webkitAudioContext;
audioCtx = new AudioContext();
if (audioCtx.state !== 'running') {
audioCtx.resume();
}
paddle.scaleX = 1.0;
paddle.width = 100;
gameStarted = true;
ball.xSpeed = 5;
ball.ySpeed = 5;
ball.up = true;
ball.right = true;
timerLength = levelArray[currentGameLevel-1].timelimit; //timer length for each level
timer = setInterval(countDownOnGame, 1000);
}
}
function countDownOnGame(){
timerLength--;
console.log(timerLength);
updateTimerLine();
if(timerLength<1){
clearTimeout(timer);
loseLife();
}
}
function keyDownHandler(e)
{
//hit left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
if(loadingCompleted){
keyboardMoveLeft = true;
}
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
updateStatusLine();
}
function tick(event) //custom tick function
{
// stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
//If no bricks left, you win
if(bricks.length == 0){
stage.update();
createjs.Sound.play("soundDeath");
clearTimeout(timer);
gameStarted = false;
alert("You won!");
window.location.href = "win.html";
}
//if the red ball exists
if(ball != null){
if(checkCollision(ball,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball,paddle);
}
//Check if we've reached the walls
if(ball.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball.right = false; //we should change the direction of ball to left
}
if(ball.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball.x = BALL_RADIUS;
ball.right = true; //move it to the right
}
if(ball.y-BALL_RADIUS<=0) //arrive the top
{
ball.y = BALL_RADIUS;
ball.up = false; //move it down
}
if(ball.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball);
ball = null;
//loseLife();
}else{
ball.lastX = ball.x; //set ball's last x value to current x value
ball.lastY = ball.y;
}
}
if(ball2 != null){
if(checkCollision(ball2,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball2,paddle);
}
if(ball2.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball2.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball2.right = false; //we should change the direction of ball to left
}
if(ball2.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball2.x = BALL_RADIUS;
ball2.right = true; //move it to the right
}
if(ball2.y-BALL_RADIUS<=0) //arrive the top
{
ball2.y = BALL_RADIUS;
ball2.up = false; //move it down
}
if(ball2.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball2);
ball2 = null;
}else{
ball2.lastX = ball2.x; //set ball's last x value to current x value
ball2.lastY = ball2.y;
}
}
if(ball == null && ball2 == null){
loseLife();
}
//Check if the paddle catch the power up (apple)
for(var i = 0; i < appleArray.length ; i++){
var apple = appleArray[i];
if(checkCollisionForPowerUps(apple,paddle)){
//remove the apple
stage.removeChild(apple);
appleArray.splice(i,1);
//extend the paddle width
paddle.width = 150;
paddle.scaleX = 1.5;
stage.update();
}
}
//Check if the paddle hit the power up (banana)
for(var i = 0; i < bananaArray.length ; i++){
var banana = bananaArray[i];
if(checkCollisionForPowerUps(banana,paddle)){
//remove the banana
stage.removeChild(banana);
bananaArray.splice(i,1);
//shot another ball when there is not a second ball on the screen
if(ball2 == null){
ball2 = new createjs.Shape();
ball2.graphics.beginFill("Green").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
ball2.x = paddle.x;
ball2.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
ball2.xSpeed = 3;
ball2.ySpeed = 3;
ball2.up = true;
ball2.right = true;
stage.addChild(ball2);
}
stage.update();
}
}
stage.update();
}
function checkCollisionForPowerUps(powerUpElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
//current left,right top and bottom border of powerUpElement
var powerUpLeftBorder = powerUpElement.x - 20;
var powerUpRightBorder = powerUpElement.x + 20;
//var powerUpTopBorder = powerUpElement.y - 20;
var powerUpBottomBorder = powerUpElement.y + 50;
// if the statement is true, the power up is inside of rectangle of the hit element
if((powerUpLeftBorder<=rightBorder) && (powerUpRightBorder >= leftBorder) && (powerUpBottomBorder <= bottomBorder) && (powerUpBottomBorder >= topBorder))
{
return true;
}
return false;
}
function checkCollision(ballElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
var previousBallLeftBorder = ballElement.lastX - BALL_RADIUS;
var previousBallRightBorder = ballElement.lastX + BALL_RADIUS;
var previousBallTopBorder = ballElement.lastY - BALL_RADIUS;
var previousBallBottomBorder = ballElement.lastY + BALL_RADIUS;
//current left,right top and bottom border of ball
var ballLeftBorder = ballElement.x - BALL_RADIUS;
var ballRightBorder = ballElement.x + BALL_RADIUS;
var ballTopBorder = ballElement.y - BALL_RADIUS;
var ballBottomBorder = ballElement.y + BALL_RADIUS;
// if the statement is true, the ball is inside of rectangle of the hit element
if((ballLeftBorder<=rightBorder) && (ballRightBorder >= leftBorder) && (ballTopBorder <= bottomBorder) && (ballBottomBorder >= topBorder))
{
if((ballTopBorder <= bottomBorder)&&(previousBallTopBorder > bottomBorder))
{
//the if statement above ensures that ball Hit from the bottom
ballElement.up = false;
ballElement.y = bottomBorder + BALL_RADIUS; //to make sure ball not entering the inside
}
if((ballBottomBorder >= topBorder)&&(previousBallBottomBorder<topBorder))
{
//Hit from the top
ballElement.up = true;
ballElement.y = topBorder - BALL_RADIUS;
}
if((ballLeftBorder<=rightBorder)&&(previousBallLeftBorder>rightBorder))
{
//Hit from the right
ballElement.right = true;
ballElement.x = rightBorder + BALL_RADIUS;
}
if((ballRightBorder >= leftBorder)&&(previousBallRightBorder < leftBorder))
{
//Hit from the left
ballElement.right = false;
ballElement.x = leftBorder - BALL_RADIUS;
}
//update the lastx and lasty
ballElement.lastX = ballElement.x;
ballElement.lastY = ballElement.y;
return true;
}
return false;
}
function dropApple(hitElement){
//create new apple based on the location of the brick
var apple = new createjs.Sprite(spriteSheet, "apple");
apple.x = hitElement.x;
apple.y = hitElement.y + 20;
stage.addChild(apple);
appleArray.push(apple);
}
function dropBanana(hitElement){
//create new banana base on the location of the brick
var banana = new createjs.Sprite(spriteSheet, "banana");
banana.x = hitElement.x ;
banana.y = hitElement.y + 20;
stage.addChild(banana);
bananaArray.push(banana);
}
function newBallXSpeedAfterCollision(ballElement,hitElement)
{
var startPoint = hitElement.x - hitElement.getBounds().width/2;
var midPoint = hitElement.x;
var endPoint = hitElement.x + hitElement.getBounds().width/2;
if(ballElement.x<midPoint) //once we hit left part
{
ballElement.right = false;
ballElement.xSpeed = FULL_X_SPEED - ((ballElement.x - startPoint)/(midPoint-startPoint)) * FULL_X_SPEED
}
else //once we hit the right part
{
ballElement.xSpeed = FULL_X_SPEED - ((endPoint - ballElement.x)/(endPoint-midPoint)) * FULL_X_SPEED
ballElement.right = true;
}
}
function createBrickGrid()
{
removeAllBricks();
var currentLevelConfig = levelArray[currentGameLevel-1];
for(var i = 0;i<14;i++) //i value is in charge of x value, means column
for(var j = 0;j<currentLevelConfig.rowCount;j++) //j value in charge of y value, means row
{
//If line break is set, and current row index is on even number, skip this row
if(currentLevelConfig.rowbreak == true && j%2 == 0){
continue;
}
//Only draw the columns of bricks as per the level configuration
if( (i>=currentLevelConfig.leftSpaceStartIndex && i<=currentLevelConfig.leftSpaceEndIndex) ||
(i>=currentLevelConfig.rightSpaceStartIndex && i<=currentLevelConfig.rightSpaceEndIndex)
){
var randomColor = getBrickColor(); //
//10 is the space between each brick
createBrick(i*(BRICKS_WIDTH+10)+40,j*(BRICKS_HEIGHT+5)+20, randomColor);
}
}
}
function getBrickColor(){
//Use math random to decide the color of this brick
var randomNumber = Math.random();
//if 0.6 - 1 return blue, 0.4-0.6 return yellow, 0.2-0.4 return orange, 0-0.2 return black
if(randomNumber > 0.6){
return "blue"; //level 1 brick
}else if(randomNumber > 0.4){
return "yellow"; //level 2 brick
}else if(randomNumber > 0.2){
return "orange"; //level 3 brick
}else{
return "black"; //level 4 brick
}
}
//Create single brick
function createBrick(x,y,color)
{
var brick = new createjs.Shape();
brick.graphics.beginFill(color);
brick.graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
brick.graphics.endFill(); //complete the drawing of the shape
//Set the name with color, so that we can use the name to determine the processing of brick hit
brick.name = color;
//change the brick registration point to let it shrink from center instead of top left corner
brick.regX = BRICKS_WIDTH/2;
brick.regY = BRICKS_HEIGHT/2;
//move the brick to see the entire brick
brick.x = x;
brick.y = y;
brick.setBounds(brick.regX,brick.regY,BRICKS_WIDTH,BRICKS_HEIGHT);
stage.addChild(brick); //add created object to the stage
bricks.push(brick); //push each brick to bricks array
}
function removeAllBricks(){
//destroy all bricks
for(var i=0;i<bricks.length;i++){
destroyBrickInstantly(bricks[i]);
bricks.splice(i,1);
i--;
}
}
function createBall()
{
ball = new createjs.Shape();
ball.graphics.beginFill("Red").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
//move the ball to the middle of the paddle for initial position
ball.x = paddle.x;
ball.y = paddle.y- PADDLE_HEIGHT/2 - BALL_RADIUS; //make sure deduct half of paddle height and ball radius
stage.addChild(ball);
ball.up = true;
ball.right = true; //determine whether ball goes up or down
ball.xSpeed = 0; // initial state
ball.ySpeed = 0; //initial state
//save the previous location of ball
ball.lastX = 0;
ball.lastY = 0;
}
function | (brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},500) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function destroyBrickInstantly(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},1) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function removeBrickFromScreen(brick)
{
stage.removeChild(brick)
}
function createPaddle()
{
paddle = new createjs.Shape();
paddle.width = PADDLE_WIDTH;
paddle.height = PADDLE_HEIGHT;
paddle.graphics.beginFill('#000000').drawRect(0, 0, paddle.width, paddle.height); //chain the commands
// paddle.y = 200;
paddle.x = stage.canvas.width/2 - PADDLE_WIDTH/2; //make sure its in the exact middle
paddle.y = stage.canvas.height * 0.9; //leave some space at the bottom for the ball to fall
paddle.regX = PADDLE_WIDTH/2; //add the registration point to the paddle;exact the middle of paddle
paddle.regY = PADDLE_HEIGHT/2;
paddle.setBounds(paddle.regX,paddle.regY,PADDLE_WIDTH,PADDLE_HEIGHT); //to set this to use getbounds later
stage.addChild(paddle);
}
function changeLevel(level){
//do not allow change level during game play
if(!gameStarted){
currentGameLevel = level;
createBrickGrid();
timerLength = levelArray[currentGameLevel-1].timelimit; //different time length for different level
updateTimerLine();
}
}
//add button event handler to change levels
document.getElementById("buttonLv1").addEventListener("click", function(){ changeLevel(1); });
document.getElementById("buttonLv2").addEventListener("click", function(){ changeLevel(2); });
document.getElementById("buttonLv3").addEventListener("click", function(){ changeLevel(3); });
document.getElementById("buttonLv4").addEventListener("click", function(){ changeLevel(4); });
};
| destroyBrick | identifier_name |
scripts.js | window.onload = function(){
var progressDivElement = document.getElementById('currentProgress');
var manifest = [
{src: "assets/fruit.json", id: "sheet1", type: "spritesheet"},
{src: "assets/Game-Break.mp3", id: "soundBreak", type: "sound"},
{src: "assets/Game-Shot.mp3", id: "soundShot",type: "sound"},
{src: "assets/Game-Death.mp3", id: "soundDeath",type: "sound"}
];
//Create loader
var loader = new createjs.LoadQueue(true, "./");
loader.on("fileload", handleFileLoad);
loader.on("progress",handleOverallProgress);
loader.on("complete",handleComplete);
loader.installPlugin(createjs.Sound);
loader.loadManifest(manifest);
var assets = []; //To store the asset files from manifest
var AudioContext; //For audio context
var audioCtx;
var loadingCompleted = false; //Only allow game start after loading is complete
var appleArray = []; //power up for longer paddle
var bananaArray = []; //power up for second ball
var spriteSheet;
function handleFileLoad(event)
{
console.log("File loaded");
assets.push(event);
}
function handleOverallProgress(event)
{
console.log('TOTAL: '+ loader.progress);
progressDivElement.style.width = (loader.progress * 100) + "%";
}
function handleComplete()
{
console.log('Loaded all files in the manifest.');
for (var i = 0; i < assets.length; i++)
{
var event = assets[i];
var result = event.result;
switch (event.item.id)
{
case 'sheet1':
spriteSheet = result;
break;
}
}
loadingCompleted = true;
}
//define paddle and bricks width and height here
const PADDLE_WIDTH = 100;
const PADDLE_HEIGHT = 20;
const BRICKS_WIDTH = 60;
const BRICKS_HEIGHT = 30;
const BALL_RADIUS = 8;
const FULL_X_SPEED = 7;
var stage;
var paddle;
var ball;
var ball2; //second ball
var bricks = [];
//configure game levels
var levelArray = [
{rowCount:1, rowbreak:false, timelimit:90, leftSpaceStartIndex:3, leftSpaceEndIndex:3, rightSpaceStartIndex:10, rightSpaceEndIndex:10},
{rowCount:5, rowbreak:false, timelimit:120, leftSpaceStartIndex:2, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:11},
{rowCount:7, rowbreak:false, timelimit:200, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:7, rightSpaceEndIndex:13},
{rowCount:11, rowbreak:true, timelimit:30, leftSpaceStartIndex:0, leftSpaceEndIndex:5, rightSpaceStartIndex:8, rightSpaceEndIndex:13},
]
var currentGameLevel = 1; //make level 1 as default for the game
var score = 0;
var lives = 3;
var scoreText;
var gameStarted = false;
//set keyboard left and right movements' initial state
var keyboardMoveLeft = false;
var keyboardMoveRight = false;
//use local storage to store data in between sessions, we add a variable to store the scores data.
var highScore = 0;
var timer;
var timerLength = 100;
var timerText;
//check if this browser supports local storage
if(typeof(Storage) !== "undefined") {
if(localStorage.highScore==undefined) //if its not in the localstorage, then set it to 0
{
localStorage.highScore = 0;
}
highScore = localStorage.highScore; //if it does, add it to the declared variable
} else {
highScore = 0;
}
stage = new createjs.Stage("testCanvas");
createjs.Touch.enable(stage);
createjs.Ticker.setFPS(60);
//createjs.Ticker.addEventListener("tick", tick);
createjs.Ticker.addEventListener("tick", stage);
createjs.Ticker.on("tick", tick);
createScoreText();
createTimerText();
addToScore(0);
createPaddle(); //create paddle before creating ball
createBall();
createBrickGrid();
//keyboard handlers
window.onkeyup = keyUpHandler;
window.onkeydown = keyDownHandler;
//if the user clicks the mouse and when the game is in the pause state, start the game.
stage.on("stagemousedown", function(event)
{
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
});
stage.on("stagemousemove", function (event) //move the paddle only if the mouse is moved.
{
if(loadingCompleted){
paddle.x = stage.mouseX;
}
});
//enlarge the stage for mobile users to finger move the paddle
stage.canvas.height = window.innerHeight;
function startLevel()
{
if(!gameStarted)
{
console.log("Start Game");
AudioContext = window.AudioContext || window.webkitAudioContext;
audioCtx = new AudioContext();
if (audioCtx.state !== 'running') {
audioCtx.resume();
}
paddle.scaleX = 1.0;
paddle.width = 100;
gameStarted = true;
ball.xSpeed = 5;
ball.ySpeed = 5;
ball.up = true;
ball.right = true;
timerLength = levelArray[currentGameLevel-1].timelimit; //timer length for each level
timer = setInterval(countDownOnGame, 1000);
}
}
function countDownOnGame(){
timerLength--;
console.log(timerLength);
updateTimerLine();
if(timerLength<1){
clearTimeout(timer);
loseLife();
}
}
function keyDownHandler(e)
{
//hit left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
if(loadingCompleted){
keyboardMoveLeft = true;
}
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
| // stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
//If no bricks left, you win
if(bricks.length == 0){
stage.update();
createjs.Sound.play("soundDeath");
clearTimeout(timer);
gameStarted = false;
alert("You won!");
window.location.href = "win.html";
}
//if the red ball exists
if(ball != null){
if(checkCollision(ball,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball,paddle);
}
//Check if we've reached the walls
if(ball.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball.right = false; //we should change the direction of ball to left
}
if(ball.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball.x = BALL_RADIUS;
ball.right = true; //move it to the right
}
if(ball.y-BALL_RADIUS<=0) //arrive the top
{
ball.y = BALL_RADIUS;
ball.up = false; //move it down
}
if(ball.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball);
ball = null;
//loseLife();
}else{
ball.lastX = ball.x; //set ball's last x value to current x value
ball.lastY = ball.y;
}
}
if(ball2 != null){
if(checkCollision(ball2,paddle))
{
createjs.Sound.play("soundShot");
newBallXSpeedAfterCollision(ball2,paddle);
}
if(ball2.x+BALL_RADIUS>=stage.canvas.width) //if the ball's right part hits the right side of the screen
{
ball2.x = stage.canvas.width-BALL_RADIUS;//to solve the problem of ball passing by a few pixels of entire width of stage
ball2.right = false; //we should change the direction of ball to left
}
if(ball2.x-BALL_RADIUS<=0) //ball hits the left side of wall
{
ball2.x = BALL_RADIUS;
ball2.right = true; //move it to the right
}
if(ball2.y-BALL_RADIUS<=0) //arrive the top
{
ball2.y = BALL_RADIUS;
ball2.up = false; //move it down
}
if(ball2.y+BALL_RADIUS>=stage.canvas.height)
{
stage.removeChild(ball2);
ball2 = null;
}else{
ball2.lastX = ball2.x; //set ball's last x value to current x value
ball2.lastY = ball2.y;
}
}
if(ball == null && ball2 == null){
loseLife();
}
//Check if the paddle catch the power up (apple)
for(var i = 0; i < appleArray.length ; i++){
var apple = appleArray[i];
if(checkCollisionForPowerUps(apple,paddle)){
//remove the apple
stage.removeChild(apple);
appleArray.splice(i,1);
//extend the paddle width
paddle.width = 150;
paddle.scaleX = 1.5;
stage.update();
}
}
//Check if the paddle hit the power up (banana)
for(var i = 0; i < bananaArray.length ; i++){
var banana = bananaArray[i];
if(checkCollisionForPowerUps(banana,paddle)){
//remove the banana
stage.removeChild(banana);
bananaArray.splice(i,1);
//shot another ball when there is not a second ball on the screen
if(ball2 == null){
ball2 = new createjs.Shape();
ball2.graphics.beginFill("Green").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
ball2.x = paddle.x;
ball2.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
ball2.xSpeed = 3;
ball2.ySpeed = 3;
ball2.up = true;
ball2.right = true;
stage.addChild(ball2);
}
stage.update();
}
}
stage.update();
}
function checkCollisionForPowerUps(powerUpElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
//current left,right top and bottom border of powerUpElement
var powerUpLeftBorder = powerUpElement.x - 20;
var powerUpRightBorder = powerUpElement.x + 20;
//var powerUpTopBorder = powerUpElement.y - 20;
var powerUpBottomBorder = powerUpElement.y + 50;
// if the statement is true, the power up is inside of rectangle of the hit element
if((powerUpLeftBorder<=rightBorder) && (powerUpRightBorder >= leftBorder) && (powerUpBottomBorder <= bottomBorder) && (powerUpBottomBorder >= topBorder))
{
return true;
}
return false;
}
function checkCollision(ballElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
var previousBallLeftBorder = ballElement.lastX - BALL_RADIUS;
var previousBallRightBorder = ballElement.lastX + BALL_RADIUS;
var previousBallTopBorder = ballElement.lastY - BALL_RADIUS;
var previousBallBottomBorder = ballElement.lastY + BALL_RADIUS;
//current left,right top and bottom border of ball
var ballLeftBorder = ballElement.x - BALL_RADIUS;
var ballRightBorder = ballElement.x + BALL_RADIUS;
var ballTopBorder = ballElement.y - BALL_RADIUS;
var ballBottomBorder = ballElement.y + BALL_RADIUS;
// if the statement is true, the ball is inside of rectangle of the hit element
if((ballLeftBorder<=rightBorder) && (ballRightBorder >= leftBorder) && (ballTopBorder <= bottomBorder) && (ballBottomBorder >= topBorder))
{
if((ballTopBorder <= bottomBorder)&&(previousBallTopBorder > bottomBorder))
{
//the if statement above ensures that ball Hit from the bottom
ballElement.up = false;
ballElement.y = bottomBorder + BALL_RADIUS; //to make sure ball not entering the inside
}
if((ballBottomBorder >= topBorder)&&(previousBallBottomBorder<topBorder))
{
//Hit from the top
ballElement.up = true;
ballElement.y = topBorder - BALL_RADIUS;
}
if((ballLeftBorder<=rightBorder)&&(previousBallLeftBorder>rightBorder))
{
//Hit from the right
ballElement.right = true;
ballElement.x = rightBorder + BALL_RADIUS;
}
if((ballRightBorder >= leftBorder)&&(previousBallRightBorder < leftBorder))
{
//Hit from the left
ballElement.right = false;
ballElement.x = leftBorder - BALL_RADIUS;
}
//update the lastx and lasty
ballElement.lastX = ballElement.x;
ballElement.lastY = ballElement.y;
return true;
}
return false;
}
function dropApple(hitElement){
//create new apple based on the location of the brick
var apple = new createjs.Sprite(spriteSheet, "apple");
apple.x = hitElement.x;
apple.y = hitElement.y + 20;
stage.addChild(apple);
appleArray.push(apple);
}
function dropBanana(hitElement){
//create new banana base on the location of the brick
var banana = new createjs.Sprite(spriteSheet, "banana");
banana.x = hitElement.x ;
banana.y = hitElement.y + 20;
stage.addChild(banana);
bananaArray.push(banana);
}
function newBallXSpeedAfterCollision(ballElement,hitElement)
{
var startPoint = hitElement.x - hitElement.getBounds().width/2;
var midPoint = hitElement.x;
var endPoint = hitElement.x + hitElement.getBounds().width/2;
if(ballElement.x<midPoint) //once we hit left part
{
ballElement.right = false;
ballElement.xSpeed = FULL_X_SPEED - ((ballElement.x - startPoint)/(midPoint-startPoint)) * FULL_X_SPEED
}
else //once we hit the right part
{
ballElement.xSpeed = FULL_X_SPEED - ((endPoint - ballElement.x)/(endPoint-midPoint)) * FULL_X_SPEED
ballElement.right = true;
}
}
function createBrickGrid()
{
removeAllBricks();
var currentLevelConfig = levelArray[currentGameLevel-1];
for(var i = 0;i<14;i++) //i value is in charge of x value, means column
for(var j = 0;j<currentLevelConfig.rowCount;j++) //j value in charge of y value, means row
{
//If line break is set, and current row index is on even number, skip this row
if(currentLevelConfig.rowbreak == true && j%2 == 0){
continue;
}
//Only draw the columns of bricks as per the level configuration
if( (i>=currentLevelConfig.leftSpaceStartIndex && i<=currentLevelConfig.leftSpaceEndIndex) ||
(i>=currentLevelConfig.rightSpaceStartIndex && i<=currentLevelConfig.rightSpaceEndIndex)
){
var randomColor = getBrickColor(); //
//10 is the space between each brick
createBrick(i*(BRICKS_WIDTH+10)+40,j*(BRICKS_HEIGHT+5)+20, randomColor);
}
}
}
function getBrickColor(){
//Use math random to decide the color of this brick
var randomNumber = Math.random();
//if 0.6 - 1 return blue, 0.4-0.6 return yellow, 0.2-0.4 return orange, 0-0.2 return black
if(randomNumber > 0.6){
return "blue"; //level 1 brick
}else if(randomNumber > 0.4){
return "yellow"; //level 2 brick
}else if(randomNumber > 0.2){
return "orange"; //level 3 brick
}else{
return "black"; //level 4 brick
}
}
//Create single brick
function createBrick(x,y,color)
{
var brick = new createjs.Shape();
brick.graphics.beginFill(color);
brick.graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
brick.graphics.endFill(); //complete the drawing of the shape
//Set the name with color, so that we can use the name to determine the processing of brick hit
brick.name = color;
//change the brick registration point to let it shrink from center instead of top left corner
brick.regX = BRICKS_WIDTH/2;
brick.regY = BRICKS_HEIGHT/2;
//move the brick to see the entire brick
brick.x = x;
brick.y = y;
brick.setBounds(brick.regX,brick.regY,BRICKS_WIDTH,BRICKS_HEIGHT);
stage.addChild(brick); //add created object to the stage
bricks.push(brick); //push each brick to bricks array
}
function removeAllBricks(){
//destroy all bricks
for(var i=0;i<bricks.length;i++){
destroyBrickInstantly(bricks[i]);
bricks.splice(i,1);
i--;
}
}
function createBall()
{
ball = new createjs.Shape();
ball.graphics.beginFill("Red").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
//move the ball to the middle of the paddle for initial position
ball.x = paddle.x;
ball.y = paddle.y- PADDLE_HEIGHT/2 - BALL_RADIUS; //make sure deduct half of paddle height and ball radius
stage.addChild(ball);
ball.up = true;
ball.right = true; //determine whether ball goes up or down
ball.xSpeed = 0; // initial state
ball.ySpeed = 0; //initial state
//save the previous location of ball
ball.lastX = 0;
ball.lastY = 0;
}
function destroyBrick(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},500) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function destroyBrickInstantly(brick) //even if its destroyed, brick is still there and in the array.
{
createjs.Tween.get(brick,{}).to({scaleX:0,scaleY:0},1) //scale default value is 1 for display object 100% ;set time of animation to 0.5 sec
setTimeout(removeBrickFromScreen,500,brick)
}
function removeBrickFromScreen(brick)
{
stage.removeChild(brick)
}
function createPaddle()
{
paddle = new createjs.Shape();
paddle.width = PADDLE_WIDTH;
paddle.height = PADDLE_HEIGHT;
paddle.graphics.beginFill('#000000').drawRect(0, 0, paddle.width, paddle.height); //chain the commands
// paddle.y = 200;
paddle.x = stage.canvas.width/2 - PADDLE_WIDTH/2; //make sure its in the exact middle
paddle.y = stage.canvas.height * 0.9; //leave some space at the bottom for the ball to fall
paddle.regX = PADDLE_WIDTH/2; //add the registration point to the paddle;exact the middle of paddle
paddle.regY = PADDLE_HEIGHT/2;
paddle.setBounds(paddle.regX,paddle.regY,PADDLE_WIDTH,PADDLE_HEIGHT); //to set this to use getbounds later
stage.addChild(paddle);
}
function changeLevel(level){
//do not allow change level during game play
if(!gameStarted){
currentGameLevel = level;
createBrickGrid();
timerLength = levelArray[currentGameLevel-1].timelimit; //different time length for different level
updateTimerLine();
}
}
//add button event handler to change levels
document.getElementById("buttonLv1").addEventListener("click", function(){ changeLevel(1); });
document.getElementById("buttonLv2").addEventListener("click", function(){ changeLevel(2); });
document.getElementById("buttonLv3").addEventListener("click", function(){ changeLevel(3); });
document.getElementById("buttonLv4").addEventListener("click", function(){ changeLevel(4); });
}; | updateStatusLine();
}
function tick(event) //custom tick function
{ | random_line_split |
spp-final.py | # Author: Omkar Sunkersett
# Purpose: To fetch SPP data and update the database
# Summer Internship at Argonne National Laboratory
import csv, datetime, ftplib, MySQLdb, os, time
class SPP():
def __init__(self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
|
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx.commit()
fread.close()
i += 1
cursor.close()
cnx.close()
except Exception as e:
print (str(e))
def dbdt_check(mkt_name, start_dt, end_dt):
try:
print ("\nStarting the database date validation check...\n")
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)", (mkt_name.lower(),))
rs = cursor.fetchone()
cursor.close()
cnx.close()
print("Database Oldest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[0], "%m-%d-%Y"))
dbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], "%Y-%m-%d"), "%Y-%m-%d")
print("Database Latest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[1], "%m-%d-%Y"))
dbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], "%Y-%m-%d"), "%Y-%m-%d")
print("Script Start Date (MM-DD-YYYY): " + start_dt)
start_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], "%Y-%m-%d")
print("Script End Date (MM-DD-YYYY): " + end_dt)
end_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], "%Y-%m-%d")
if start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d"), "%Y-%m-%d"):
print ("\nThe database date validation check has completed successfully. The program will now execute...\n")
return True
else:
actual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
actual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
print ("\nPlease check the script start and end dates properly. The start date must be set to " + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + " (MM-DD-YYYY) and the end date must be less than or equal to " + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + " (MM-DD-YYYY) and also not less than the start date.")
return False
except Exception as e:
print (str(e))
def main():
print ("\n********** Start of the Program **********\n")
# prog_dir is the main directory under which the CSV files will be stored
#prog_dir = "C:\\Users\\Omkar Sunkersett\\Downloads\\markets"
# These respective variables set the start and end dates for fetching data from the server
#startdatetime = "MM-DD-YYYY"
#enddatetime = "MM-DD-YYYY"
if dbdt_check("SPP", startdatetime, enddatetime):
# Code for fetching the CSV files from the server for historical offers
#histoff_or = SPP("pubftp.spp.org", "/Markets/HistoricalOffers/", startdatetime, enddatetime, prog_dir)
#histoff_or.fetch_files("/Markets/HistoricalOffers", "")
#rint(histoff_or)
# Code for loading the historical offer related CSV data into the not-published database for OR only
# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines
#print ("\nLoading the new data into the database...\n")
#etl_file_data(prog_dir + "\\cache\\spp\\Markets\HistoricalOffers.txt")
print ("\n********** End of the Program **********\n")
main()
| off_id = off_check[0]
ins_perf = False | conditional_block |
spp-final.py | # Author: Omkar Sunkersett
# Purpose: To fetch SPP data and update the database
# Summer Internship at Argonne National Laboratory
import csv, datetime, ftplib, MySQLdb, os, time
class SPP():
def | (self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx.commit()
fread.close()
i += 1
cursor.close()
cnx.close()
except Exception as e:
print (str(e))
def dbdt_check(mkt_name, start_dt, end_dt):
try:
print ("\nStarting the database date validation check...\n")
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)", (mkt_name.lower(),))
rs = cursor.fetchone()
cursor.close()
cnx.close()
print("Database Oldest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[0], "%m-%d-%Y"))
dbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], "%Y-%m-%d"), "%Y-%m-%d")
print("Database Latest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[1], "%m-%d-%Y"))
dbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], "%Y-%m-%d"), "%Y-%m-%d")
print("Script Start Date (MM-DD-YYYY): " + start_dt)
start_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], "%Y-%m-%d")
print("Script End Date (MM-DD-YYYY): " + end_dt)
end_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], "%Y-%m-%d")
if start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d"), "%Y-%m-%d"):
print ("\nThe database date validation check has completed successfully. The program will now execute...\n")
return True
else:
actual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
actual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
print ("\nPlease check the script start and end dates properly. The start date must be set to " + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + " (MM-DD-YYYY) and the end date must be less than or equal to " + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + " (MM-DD-YYYY) and also not less than the start date.")
return False
except Exception as e:
print (str(e))
def main():
print ("\n********** Start of the Program **********\n")
# prog_dir is the main directory under which the CSV files will be stored
#prog_dir = "C:\\Users\\Omkar Sunkersett\\Downloads\\markets"
# These respective variables set the start and end dates for fetching data from the server
#startdatetime = "MM-DD-YYYY"
#enddatetime = "MM-DD-YYYY"
if dbdt_check("SPP", startdatetime, enddatetime):
# Code for fetching the CSV files from the server for historical offers
#histoff_or = SPP("pubftp.spp.org", "/Markets/HistoricalOffers/", startdatetime, enddatetime, prog_dir)
#histoff_or.fetch_files("/Markets/HistoricalOffers", "")
#rint(histoff_or)
# Code for loading the historical offer related CSV data into the not-published database for OR only
# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines
#print ("\nLoading the new data into the database...\n")
#etl_file_data(prog_dir + "\\cache\\spp\\Markets\HistoricalOffers.txt")
print ("\n********** End of the Program **********\n")
main()
| __init__ | identifier_name |
spp-final.py | # Author: Omkar Sunkersett
# Purpose: To fetch SPP data and update the database
# Summer Internship at Argonne National Laboratory
import csv, datetime, ftplib, MySQLdb, os, time
class SPP():
def __init__(self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
|
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx.commit()
fread.close()
i += 1
cursor.close()
cnx.close()
except Exception as e:
print (str(e))
def dbdt_check(mkt_name, start_dt, end_dt):
try:
print ("\nStarting the database date validation check...\n")
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)", (mkt_name.lower(),))
rs = cursor.fetchone()
cursor.close()
cnx.close()
print("Database Oldest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[0], "%m-%d-%Y"))
dbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], "%Y-%m-%d"), "%Y-%m-%d")
print("Database Latest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[1], "%m-%d-%Y"))
dbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], "%Y-%m-%d"), "%Y-%m-%d")
print("Script Start Date (MM-DD-YYYY): " + start_dt)
start_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], "%Y-%m-%d")
print("Script End Date (MM-DD-YYYY): " + end_dt)
end_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], "%Y-%m-%d")
if start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d"), "%Y-%m-%d"):
print ("\nThe database date validation check has completed successfully. The program will now execute...\n")
return True
else:
actual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
actual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
print ("\nPlease check the script start and end dates properly. The start date must be set to " + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + " (MM-DD-YYYY) and the end date must be less than or equal to " + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + " (MM-DD-YYYY) and also not less than the start date.")
return False
except Exception as e:
print (str(e))
def main():
print ("\n********** Start of the Program **********\n")
# prog_dir is the main directory under which the CSV files will be stored
#prog_dir = "C:\\Users\\Omkar Sunkersett\\Downloads\\markets"
# These respective variables set the start and end dates for fetching data from the server
#startdatetime = "MM-DD-YYYY"
#enddatetime = "MM-DD-YYYY"
if dbdt_check("SPP", startdatetime, enddatetime):
# Code for fetching the CSV files from the server for historical offers
#histoff_or = SPP("pubftp.spp.org", "/Markets/HistoricalOffers/", startdatetime, enddatetime, prog_dir)
#histoff_or.fetch_files("/Markets/HistoricalOffers", "")
#rint(histoff_or)
# Code for loading the historical offer related CSV data into the not-published database for OR only
# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines
#print ("\nLoading the new data into the database...\n")
#etl_file_data(prog_dir + "\\cache\\spp\\Markets\HistoricalOffers.txt")
print ("\n********** End of the Program **********\n")
main()
| try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e)) | identifier_body |
spp-final.py | # Author: Omkar Sunkersett
# Purpose: To fetch SPP data and update the database
# Summer Internship at Argonne National Laboratory
import csv, datetime, ftplib, MySQLdb, os, time
class SPP():
def __init__(self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
| ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx.commit()
fread.close()
i += 1
cursor.close()
cnx.close()
except Exception as e:
print (str(e))
def dbdt_check(mkt_name, start_dt, end_dt):
try:
print ("\nStarting the database date validation check...\n")
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)", (mkt_name.lower(),))
rs = cursor.fetchone()
cursor.close()
cnx.close()
print("Database Oldest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[0], "%m-%d-%Y"))
dbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], "%Y-%m-%d"), "%Y-%m-%d")
print("Database Latest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[1], "%m-%d-%Y"))
dbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], "%Y-%m-%d"), "%Y-%m-%d")
print("Script Start Date (MM-DD-YYYY): " + start_dt)
start_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], "%Y-%m-%d")
print("Script End Date (MM-DD-YYYY): " + end_dt)
end_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], "%Y-%m-%d")
if start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d"), "%Y-%m-%d"):
print ("\nThe database date validation check has completed successfully. The program will now execute...\n")
return True
else:
actual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
actual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
print ("\nPlease check the script start and end dates properly. The start date must be set to " + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + " (MM-DD-YYYY) and the end date must be less than or equal to " + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + " (MM-DD-YYYY) and also not less than the start date.")
return False
except Exception as e:
print (str(e))
def main():
print ("\n********** Start of the Program **********\n")
# prog_dir is the main directory under which the CSV files will be stored
#prog_dir = "C:\\Users\\Omkar Sunkersett\\Downloads\\markets"
# These respective variables set the start and end dates for fetching data from the server
#startdatetime = "MM-DD-YYYY"
#enddatetime = "MM-DD-YYYY"
if dbdt_check("SPP", startdatetime, enddatetime):
# Code for fetching the CSV files from the server for historical offers
#histoff_or = SPP("pubftp.spp.org", "/Markets/HistoricalOffers/", startdatetime, enddatetime, prog_dir)
#histoff_or.fetch_files("/Markets/HistoricalOffers", "")
#rint(histoff_or)
# Code for loading the historical offer related CSV data into the not-published database for OR only
# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines
#print ("\nLoading the new data into the database...\n")
#etl_file_data(prog_dir + "\\cache\\spp\\Markets\HistoricalOffers.txt")
print ("\n********** End of the Program **********\n")
main() | next(frows, None)
offer_base_rs = []
| random_line_split |
TypeScriptHelpers.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
/* eslint-disable no-bitwise */
import * as ts from 'typescript';
import { SourceFileLocationFormatter } from './SourceFileLocationFormatter';
import { TypeScriptInternals } from './TypeScriptInternals';
import { InternalError } from '@rushstack/node-core-library';
export class TypeScriptHelpers {
// Matches TypeScript's encoded names for well-known ECMAScript symbols like
// "__@iterator" or "__@toStringTag".
private static readonly _wellKnownSymbolNameRegExp: RegExp = /^__@(\w+)$/;
// Matches TypeScript's encoded names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static | <T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const match: RegExpExecArray | null = TypeScriptHelpers._wellKnownSymbolNameRegExp.exec(name as string);
if (match) {
const identifier: string = match[1];
return `[Symbol.${identifier}]`;
}
return undefined;
}
/**
* Returns whether the provided name was generated for a TypeScript `unique symbol`.
*/
public static isUniqueSymbolName(name: ts.__String): boolean {
return TypeScriptHelpers._uniqueSymbolNameRegExp.test(name as string);
}
/**
* Derives the string representation of a TypeScript late-bound symbol.
*/
public static tryGetLateBoundName(declarationName: ts.ComputedPropertyName): string | undefined {
// Create a node printer that ignores comments and indentation that we can use to convert
// declarationName to a string.
const printer: ts.Printer = ts.createPrinter(
{ removeComments: true },
{
onEmitNode(
hint: ts.EmitHint,
node: ts.Node,
emitCallback: (hint: ts.EmitHint, node: ts.Node) => void
): void {
ts.setEmitFlags(declarationName, ts.EmitFlags.NoIndentation | ts.EmitFlags.SingleLine);
emitCallback(hint, node);
}
}
);
const sourceFile: ts.SourceFile = declarationName.getSourceFile();
const text: string = printer.printNode(ts.EmitHint.Unspecified, declarationName, sourceFile);
// clean up any emit flags we've set on any nodes in the tree.
ts.disposeEmitNodes(sourceFile);
return text;
}
}
| findFirstChildNode | identifier_name |
TypeScriptHelpers.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
/* eslint-disable no-bitwise */
import * as ts from 'typescript';
import { SourceFileLocationFormatter } from './SourceFileLocationFormatter';
import { TypeScriptInternals } from './TypeScriptInternals';
import { InternalError } from '@rushstack/node-core-library';
export class TypeScriptHelpers {
// Matches TypeScript's encoded names for well-known ECMAScript symbols like
// "__@iterator" or "__@toStringTag".
private static readonly _wellKnownSymbolNameRegExp: RegExp = /^__@(\w+)$/;
// Matches TypeScript's encoded names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined |
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const match: RegExpExecArray | null = TypeScriptHelpers._wellKnownSymbolNameRegExp.exec(name as string);
if (match) {
const identifier: string = match[1];
return `[Symbol.${identifier}]`;
}
return undefined;
}
/**
* Returns whether the provided name was generated for a TypeScript `unique symbol`.
*/
public static isUniqueSymbolName(name: ts.__String): boolean {
return TypeScriptHelpers._uniqueSymbolNameRegExp.test(name as string);
}
/**
* Derives the string representation of a TypeScript late-bound symbol.
*/
public static tryGetLateBoundName(declarationName: ts.ComputedPropertyName): string | undefined {
// Create a node printer that ignores comments and indentation that we can use to convert
// declarationName to a string.
const printer: ts.Printer = ts.createPrinter(
{ removeComments: true },
{
onEmitNode(
hint: ts.EmitHint,
node: ts.Node,
emitCallback: (hint: ts.EmitHint, node: ts.Node) => void
): void {
ts.setEmitFlags(declarationName, ts.EmitFlags.NoIndentation | ts.EmitFlags.SingleLine);
emitCallback(hint, node);
}
}
);
const sourceFile: ts.SourceFile = declarationName.getSourceFile();
const text: string = printer.printNode(ts.EmitHint.Unspecified, declarationName, sourceFile);
// clean up any emit flags we've set on any nodes in the tree.
ts.disposeEmitNodes(sourceFile);
return text;
}
}
| {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
} | identifier_body |
TypeScriptHelpers.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
/* eslint-disable no-bitwise */
import * as ts from 'typescript';
import { SourceFileLocationFormatter } from './SourceFileLocationFormatter';
import { TypeScriptInternals } from './TypeScriptInternals';
import { InternalError } from '@rushstack/node-core-library';
export class TypeScriptHelpers {
// Matches TypeScript's encoded names for well-known ECMAScript symbols like
// "__@iterator" or "__@toStringTag".
private static readonly _wellKnownSymbolNameRegExp: RegExp = /^__@(\w+)$/;
// Matches TypeScript's encoded names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol { | const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const match: RegExpExecArray | null = TypeScriptHelpers._wellKnownSymbolNameRegExp.exec(name as string);
if (match) {
const identifier: string = match[1];
return `[Symbol.${identifier}]`;
}
return undefined;
}
/**
* Returns whether the provided name was generated for a TypeScript `unique symbol`.
*/
public static isUniqueSymbolName(name: ts.__String): boolean {
return TypeScriptHelpers._uniqueSymbolNameRegExp.test(name as string);
}
/**
* Derives the string representation of a TypeScript late-bound symbol.
*/
public static tryGetLateBoundName(declarationName: ts.ComputedPropertyName): string | undefined {
// Create a node printer that ignores comments and indentation that we can use to convert
// declarationName to a string.
const printer: ts.Printer = ts.createPrinter(
{ removeComments: true },
{
onEmitNode(
hint: ts.EmitHint,
node: ts.Node,
emitCallback: (hint: ts.EmitHint, node: ts.Node) => void
): void {
ts.setEmitFlags(declarationName, ts.EmitFlags.NoIndentation | ts.EmitFlags.SingleLine);
emitCallback(hint, node);
}
}
);
const sourceFile: ts.SourceFile = declarationName.getSourceFile();
const text: string = printer.printNode(ts.EmitHint.Unspecified, declarationName, sourceFile);
// clean up any emit flags we've set on any nodes in the tree.
ts.disposeEmitNodes(sourceFile);
return text;
}
} | random_line_split | |
TypeScriptHelpers.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
/* eslint-disable no-bitwise */
import * as ts from 'typescript';
import { SourceFileLocationFormatter } from './SourceFileLocationFormatter';
import { TypeScriptInternals } from './TypeScriptInternals';
import { InternalError } from '@rushstack/node-core-library';
export class TypeScriptHelpers {
// Matches TypeScript's encoded names for well-known ECMAScript symbols like
// "__@iterator" or "__@toStringTag".
private static readonly _wellKnownSymbolNameRegExp: RegExp = /^__@(\w+)$/;
// Matches TypeScript's encoded names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) |
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const match: RegExpExecArray | null = TypeScriptHelpers._wellKnownSymbolNameRegExp.exec(name as string);
if (match) {
const identifier: string = match[1];
return `[Symbol.${identifier}]`;
}
return undefined;
}
/**
* Returns whether the provided name was generated for a TypeScript `unique symbol`.
*/
public static isUniqueSymbolName(name: ts.__String): boolean {
return TypeScriptHelpers._uniqueSymbolNameRegExp.test(name as string);
}
/**
* Derives the string representation of a TypeScript late-bound symbol.
*/
public static tryGetLateBoundName(declarationName: ts.ComputedPropertyName): string | undefined {
// Create a node printer that ignores comments and indentation that we can use to convert
// declarationName to a string.
const printer: ts.Printer = ts.createPrinter(
{ removeComments: true },
{
onEmitNode(
hint: ts.EmitHint,
node: ts.Node,
emitCallback: (hint: ts.EmitHint, node: ts.Node) => void
): void {
ts.setEmitFlags(declarationName, ts.EmitFlags.NoIndentation | ts.EmitFlags.SingleLine);
emitCallback(hint, node);
}
}
);
const sourceFile: ts.SourceFile = declarationName.getSourceFile();
const text: string = printer.printNode(ts.EmitHint.Unspecified, declarationName, sourceFile);
// clean up any emit flags we've set on any nodes in the tree.
ts.disposeEmitNodes(sourceFile);
return text;
}
}
| {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
} | conditional_block |
index.js | const d3 = require('d3');
const csvFile = require('./cleaner_csv.csv');
const wordData = require('./words_to_id.csv');
var parseTime = d3.timeParse("%Y-%m-%d %H:%M:%S");
var myList;
let map = new Map();
d3.csv(wordData).then(function(data) {
for (let i = 0; i < data.length; i++) {
// First poblate the map.
if (!map.has(data[i].Word)) {
// If we don't have the next word then we add it with an array.
map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) |
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration(500)
.style("opacity", 0);
});
var scat = scatter
.selectAll("circle");
// Update chart when zooming
function updateChart() {
// Recover the new scale
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
// Update axes with these new boundaries
xAxis.call(d3.axisBottom(newX))
yAxis.call(d3.axisLeft(newY))
// Update circle position
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
function zoomed() {
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
xAxis.call(d3.axisBottom(newX).tickFormat(function(date) {
if (d3.event.transform.k == 1) {
return d3.timeFormat("%b %Y")(date);
} else {
return d3.timeFormat("%b %e, %Y")(date);
}}));
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
})
} | {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
} | conditional_block |
index.js | const d3 = require('d3');
const csvFile = require('./cleaner_csv.csv');
const wordData = require('./words_to_id.csv');
var parseTime = d3.timeParse("%Y-%m-%d %H:%M:%S");
var myList;
let map = new Map();
d3.csv(wordData).then(function(data) {
for (let i = 0; i < data.length; i++) {
// First poblate the map.
if (!map.has(data[i].Word)) {
// If we don't have the next word then we add it with an array.
map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration(500)
.style("opacity", 0);
});
var scat = scatter
.selectAll("circle");
// Update chart when zooming
function updateChart() {
// Recover the new scale
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
// Update axes with these new boundaries
xAxis.call(d3.axisBottom(newX))
yAxis.call(d3.axisLeft(newY))
// Update circle position
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
function zoomed() |
})
} | {
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
xAxis.call(d3.axisBottom(newX).tickFormat(function(date) {
if (d3.event.transform.k == 1) {
return d3.timeFormat("%b %Y")(date);
} else {
return d3.timeFormat("%b %e, %Y")(date);
}}));
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
} | identifier_body |
index.js | const d3 = require('d3');
const csvFile = require('./cleaner_csv.csv');
const wordData = require('./words_to_id.csv');
var parseTime = d3.timeParse("%Y-%m-%d %H:%M:%S");
var myList;
let map = new Map();
d3.csv(wordData).then(function(data) {
for (let i = 0; i < data.length; i++) {
// First poblate the map.
if (!map.has(data[i].Word)) {
// If we don't have the next word then we add it with an array.
map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
| });
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration(500)
.style("opacity", 0);
});
var scat = scatter
.selectAll("circle");
// Update chart when zooming
function updateChart() {
// Recover the new scale
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
// Update axes with these new boundaries
xAxis.call(d3.axisBottom(newX))
yAxis.call(d3.axisLeft(newY))
// Update circle position
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
function zoomed() {
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
xAxis.call(d3.axisBottom(newX).tickFormat(function(date) {
if (d3.event.transform.k == 1) {
return d3.timeFormat("%b %Y")(date);
} else {
return d3.timeFormat("%b %e, %Y")(date);
}}));
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
})
} | d3.selectAll("g > *").remove();
drawScatter(americaResults);
}) | random_line_split |
index.js | const d3 = require('d3');
const csvFile = require('./cleaner_csv.csv');
const wordData = require('./words_to_id.csv');
var parseTime = d3.timeParse("%Y-%m-%d %H:%M:%S");
var myList;
let map = new Map();
d3.csv(wordData).then(function(data) {
for (let i = 0; i < data.length; i++) {
// First poblate the map.
if (!map.has(data[i].Word)) {
// If we don't have the next word then we add it with an array.
map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function | (searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration(500)
.style("opacity", 0);
});
var scat = scatter
.selectAll("circle");
// Update chart when zooming
function updateChart() {
// Recover the new scale
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
// Update axes with these new boundaries
xAxis.call(d3.axisBottom(newX))
yAxis.call(d3.axisLeft(newY))
// Update circle position
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
function zoomed() {
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
xAxis.call(d3.axisBottom(newX).tickFormat(function(date) {
if (d3.event.transform.k == 1) {
return d3.timeFormat("%b %Y")(date);
} else {
return d3.timeFormat("%b %e, %Y")(date);
}}));
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
})
} | drawScatter | identifier_name |
train_k_fold.py | import argparse
import json
import logging
import numpy
import random
from time import time
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
import summarunner_weather.models
import summarunner_weather.utils
logging.basicConfig(level=logging.INFO, format='%(asctime)res [INFO] %(message)res')
parser = argparse.ArgumentParser(description='extractive summary')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
|
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with open(args.test_dir, 'r', encoding='utf-8') as f:
test_data = [json.loads(line) for line in f]
test_dataset = summarunner_weather.utils.Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False)
test_loss = eval(net, vocab, test_iter, criterion) # 获取测试集上的loss
infos.append(json.dumps({'fold': cv_ptr, 'train_lozz': train_loss, 'val_lozz': val_loss, 'test_loss': test_loss,
'time_cost': (t2 - t1) / 3600}))
cv_ptr += 1
with open(log_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(infos))
def train_3_model_k_fold(model_prefixies, save_dir):
for model_prefix in model_prefixies:
args.model = model_prefix
log_path = os.path.join(save_dir, "train_k_fold_" + model_prefix + "_info.txt")
train_k_fold(log_path)
if __name__ == '__main__':
models = ["RNN_RNN", 'CNN_RNN', 'AttnRNN']
# train_k_fold()
# train()
train_3_model_k_fold(models, "checkpoints") # 对3个模型进行k折交叉验证
| net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss | identifier_body |
train_k_fold.py | import argparse
import json
import logging
import numpy
import random
from time import time
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
import summarunner_weather.models
import summarunner_weather.utils
logging.basicConfig(level=logging.INFO, format='%(asctime)res [INFO] %(message)res')
parser = argparse.ArgumentParser(description='extractive summary')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_ | o('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with open(args.test_dir, 'r', encoding='utf-8') as f:
test_data = [json.loads(line) for line in f]
test_dataset = summarunner_weather.utils.Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False)
test_loss = eval(net, vocab, test_iter, criterion) # 获取测试集上的loss
infos.append(json.dumps({'fold': cv_ptr, 'train_lozz': train_loss, 'val_lozz': val_loss, 'test_loss': test_loss,
'time_cost': (t2 - t1) / 3600}))
cv_ptr += 1
with open(log_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(infos))
def train_3_model_k_fold(model_prefixies, save_dir):
for model_prefix in model_prefixies:
args.model = model_prefix
log_path = os.path.join(save_dir, "train_k_fold_" + model_prefix + "_info.txt")
train_k_fold(log_path)
if __name__ == '__main__':
models = ["RNN_RNN", 'CNN_RNN', 'AttnRNN']
# train_k_fold()
# train()
train_3_model_k_fold(models, "checkpoints") # 对3个模型进行k折交叉验证
| iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.inf | conditional_block |
train_k_fold.py | import argparse
import json
import logging
import numpy
import random
from time import time
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
import summarunner_weather.models
import summarunner_weather.utils
logging.basicConfig(level=logging.INFO, format='%(asctime)res [INFO] %(message)res')
parser = argparse.ArgumentParser(description='extractive summary')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoin | old_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with open(args.test_dir, 'r', encoding='utf-8') as f:
test_data = [json.loads(line) for line in f]
test_dataset = summarunner_weather.utils.Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False)
test_loss = eval(net, vocab, test_iter, criterion) # 获取测试集上的loss
infos.append(json.dumps({'fold': cv_ptr, 'train_lozz': train_loss, 'val_lozz': val_loss, 'test_loss': test_loss,
'time_cost': (t2 - t1) / 3600}))
cv_ptr += 1
with open(log_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(infos))
def train_3_model_k_fold(model_prefixies, save_dir):
for model_prefix in model_prefixies:
args.model = model_prefix
log_path = os.path.join(save_dir, "train_k_fold_" + model_prefix + "_info.txt")
train_k_fold(log_path)
if __name__ == '__main__':
models = ["RNN_RNN", 'CNN_RNN', 'AttnRNN']
# train_k_fold()
# train()
train_3_model_k_fold(models, "checkpoints") # 对3个模型进行k折交叉验证
| ts/train_k_f | identifier_name |
train_k_fold.py | import argparse
import json
import logging
import numpy
import random
from time import time
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
import summarunner_weather.models
import summarunner_weather.utils
logging.basicConfig(level=logging.INFO, format='%(asctime)res [INFO] %(message)res')
parser = argparse.ArgumentParser(description='extractive summary')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train | parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with open(args.test_dir, 'r', encoding='utf-8') as f:
test_data = [json.loads(line) for line in f]
test_dataset = summarunner_weather.utils.Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False)
test_loss = eval(net, vocab, test_iter, criterion) # 获取测试集上的loss
infos.append(json.dumps({'fold': cv_ptr, 'train_lozz': train_loss, 'val_lozz': val_loss, 'test_loss': test_loss,
'time_cost': (t2 - t1) / 3600}))
cv_ptr += 1
with open(log_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(infos))
def train_3_model_k_fold(model_prefixies, save_dir):
for model_prefix in model_prefixies:
args.model = model_prefix
log_path = os.path.join(save_dir, "train_k_fold_" + model_prefix + "_info.txt")
train_k_fold(log_path)
if __name__ == '__main__':
models = ["RNN_RNN", 'CNN_RNN', 'AttnRNN']
# train_k_fold()
# train()
train_3_model_k_fold(models, "checkpoints") # 对3个模型进行k折交叉验证 | parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30) | random_line_split |
roadtrip_compute.py | import pandas as pd
from itertools import combinations
import requests
import json
from math import trunc
from datetime import datetime, timedelta
import re
from collections import Counter
from itertools import chain
from DatabaseAccess.Connector import Connector
import DatabaseAccess.sql_requests as sql
import bdd_management
import algorithms
import visualization
#Liste des clefs générées pour utiliser l'API
#API_KEY = '6cf28a3a-59c3-4c82-8cbf-8fa5e64b01da'
#API_KEY = '3fd6041b-beda-4a79-9f1a-09bc263a1dfd'
#API_KEY = 'd3f69ecb-68f5-477e-b1bb-d58208f936c5'
#API_KEY = '78cc6f8e-68d6-450d-89d0-8a085b6c5af5'
API_KEY = 'b84ebebd-476c-4204-b195-7ffeb67043e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector() | waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
top_journeys = journey_groups.most_common(1)[0][0]
print('Le voyage le plus représentatif est :')
print(top_journeys)
# calcul des horaires de voyage réels pour le trajet le plus optimisé
print('Départ du calcul du voyage le %s' %
(datetime_str_to_datetime_str(trip_start_date)))
travel_date = trip_start_date
db_connector = Connector()
with db_connector:
try:
#vidage de la table contenant les informations du voyage
bdd_management.truncate_roadtrip()
for i in range(len(top_journeys)-1):
try:
from_city_insee = top_journeys[i]
to_city_insee = top_journeys[i+1]
route = requests.get(API_NAVITIA.format(
int(from_city_insee), int(to_city_insee), travel_date, API_KEY))
travels = json.loads(route.text)
# Contrôle des voyage reçus pour identifier le plus adapté à recherche
best_travel = travels["journeys"][0]
for travel in travels["journeys"]:
if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):
best_travel = travel
if best_travel['arrival_date_time'] > travel['arrival_date_time']:
best_travel = travel
# sauvegarde du trajet 'i' en base
save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)
# le prochain trajet devra avoir une date de départ > à la date de ce trajet
travel_date = best_travel['arrival_date_time']
except Exception as e:
print("!! Erreur durant le calcul du trajet entre '%s' et '%s'" %
(from_city_insee, to_city_insee))
#Ecriture du résumé du voyage
resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)
resume = resume.fetchone()
resume_description = """Début du voyage le {} . Arrivée le {}.
Le voyage à durée {} pour un total de {:d} kgeC""".format(
datetime_str_to_datetime_str(trip_start_date),
datetime_str_to_datetime_str(travel_date),
str(timedelta(seconds=resume[0])) ,
trunc( resume[1]/1000))
store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])
db_connector.commit()
except Exception as e:
db_connector.rollback()
print('Erreur durant la création du voyage. rollback effectué!!!')
print('print map with road-trip data')
visualization.generate_visualization()
print('Travel complete. Have nive trip!!!') | with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1, | random_line_split |
roadtrip_compute.py | import pandas as pd
from itertools import combinations
import requests
import json
from math import trunc
from datetime import datetime, timedelta
import re
from collections import Counter
from itertools import chain
from DatabaseAccess.Connector import Connector
import DatabaseAccess.sql_requests as sql
import bdd_management
import algorithms
import visualization
#Liste des clefs générées pour utiliser l'API
#API_KEY = '6cf28a3a-59c3-4c82-8cbf-8fa5e64b01da'
#API_KEY = '3fd6041b-beda-4a79-9f1a-09bc263a1dfd'
#API_KEY = 'd3f69ecb-68f5-477e-b1bb-d58208f936c5'
#API_KEY = '78cc6f8e-68d6-450d-89d0-8a085b6c5af5'
API_KEY = 'b84ebebd-476c-4204-b195-7ffeb67043e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_ | onnector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
top_journeys = journey_groups.most_common(1)[0][0]
print('Le voyage le plus représentatif est :')
print(top_journeys)
# calcul des horaires de voyage réels pour le trajet le plus optimisé
print('Départ du calcul du voyage le %s' %
(datetime_str_to_datetime_str(trip_start_date)))
travel_date = trip_start_date
db_connector = Connector()
with db_connector:
try:
#vidage de la table contenant les informations du voyage
bdd_management.truncate_roadtrip()
for i in range(len(top_journeys)-1):
try:
from_city_insee = top_journeys[i]
to_city_insee = top_journeys[i+1]
route = requests.get(API_NAVITIA.format(
int(from_city_insee), int(to_city_insee), travel_date, API_KEY))
travels = json.loads(route.text)
# Contrôle des voyage reçus pour identifier le plus adapté à recherche
best_travel = travels["journeys"][0]
for travel in travels["journeys"]:
if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):
best_travel = travel
if best_travel['arrival_date_time'] > travel['arrival_date_time']:
best_travel = travel
# sauvegarde du trajet 'i' en base
save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)
# le prochain trajet devra avoir une date de départ > à la date de ce trajet
travel_date = best_travel['arrival_date_time']
except Exception as e:
print("!! Erreur durant le calcul du trajet entre '%s' et '%s'" %
(from_city_insee, to_city_insee))
#Ecriture du résumé du voyage
resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)
resume = resume.fetchone()
resume_description = """Début du voyage le {} . Arrivée le {}.
Le voyage à durée {} pour un total de {:d} kgeC""".format(
datetime_str_to_datetime_str(trip_start_date),
datetime_str_to_datetime_str(travel_date),
str(timedelta(seconds=resume[0])) ,
trunc( resume[1]/1000))
store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])
db_connector.commit()
except Exception as e:
db_connector.rollback()
print('Erreur durant la création du voyage. rollback effectué!!!')
print('print map with road-trip data')
visualization.generate_visualization()
print('Travel complete. Have nive trip!!!')
| trip_section(db_c | identifier_name |
roadtrip_compute.py | import pandas as pd
from itertools import combinations
import requests
import json
from math import trunc
from datetime import datetime, timedelta
import re
from collections import Counter
from itertools import chain
from DatabaseAccess.Connector import Connector
import DatabaseAccess.sql_requests as sql
import bdd_management
import algorithms
import visualization
#Liste des clefs générées pour utiliser l'API
#API_KEY = '6cf28a3a-59c3-4c82-8cbf-8fa5e64b01da'
#API_KEY = '3fd6041b-beda-4a79-9f1a-09bc263a1dfd'
#API_KEY = 'd3f69ecb-68f5-477e-b1bb-d58208f936c5'
#API_KEY = '78cc6f8e-68d6-450d-89d0-8a085b6c5af5'
API_KEY = 'b84ebebd-476c-4204-b195-7ffeb67043e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
| route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
top_journeys = journey_groups.most_common(1)[0][0]
print('Le voyage le plus représentatif est :')
print(top_journeys)
# calcul des horaires de voyage réels pour le trajet le plus optimisé
print('Départ du calcul du voyage le %s' %
(datetime_str_to_datetime_str(trip_start_date)))
travel_date = trip_start_date
db_connector = Connector()
with db_connector:
try:
#vidage de la table contenant les informations du voyage
bdd_management.truncate_roadtrip()
for i in range(len(top_journeys)-1):
try:
from_city_insee = top_journeys[i]
to_city_insee = top_journeys[i+1]
route = requests.get(API_NAVITIA.format(
int(from_city_insee), int(to_city_insee), travel_date, API_KEY))
travels = json.loads(route.text)
# Contrôle des voyage reçus pour identifier le plus adapté à recherche
best_travel = travels["journeys"][0]
for travel in travels["journeys"]:
if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):
best_travel = travel
if best_travel['arrival_date_time'] > travel['arrival_date_time']:
best_travel = travel
# sauvegarde du trajet 'i' en base
save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)
# le prochain trajet devra avoir une date de départ > à la date de ce trajet
travel_date = best_travel['arrival_date_time']
except Exception as e:
print("!! Erreur durant le calcul du trajet entre '%s' et '%s'" %
(from_city_insee, to_city_insee))
#Ecriture du résumé du voyage
resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)
resume = resume.fetchone()
resume_description = """Début du voyage le {} . Arrivée le {}.
Le voyage à durée {} pour un total de {:d} kgeC""".format(
datetime_str_to_datetime_str(trip_start_date),
datetime_str_to_datetime_str(travel_date),
str(timedelta(seconds=resume[0])) ,
trunc( resume[1]/1000))
store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])
db_connector.commit()
except Exception as e:
db_connector.rollback()
print('Erreur durant la création du voyage. rollback effectué!!!')
print('print map with road-trip data')
visualization.generate_visualization()
print('Travel complete. Have nive trip!!!')
| conditional_block | |
roadtrip_compute.py | import pandas as pd
from itertools import combinations
import requests
import json
from math import trunc
from datetime import datetime, timedelta
import re
from collections import Counter
from itertools import chain
from DatabaseAccess.Connector import Connector
import DatabaseAccess.sql_requests as sql
import bdd_management
import algorithms
import visualization
#Liste des clefs générées pour utiliser l'API
#API_KEY = '6cf28a3a-59c3-4c82-8cbf-8fa5e64b01da'
#API_KEY = '3fd6041b-beda-4a79-9f1a-09bc263a1dfd'
#API_KEY = 'd3f69ecb-68f5-477e-b1bb-d58208f936c5'
#API_KEY = '78cc6f8e-68d6-450d-89d0-8a085b6c5af5'
API_KEY = 'b84ebebd-476c-4204-b195-7ffeb67043e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""st | save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
top_journeys = journey_groups.most_common(1)[0][0]
print('Le voyage le plus représentatif est :')
print(top_journeys)
# calcul des horaires de voyage réels pour le trajet le plus optimisé
print('Départ du calcul du voyage le %s' %
(datetime_str_to_datetime_str(trip_start_date)))
travel_date = trip_start_date
db_connector = Connector()
with db_connector:
try:
#vidage de la table contenant les informations du voyage
bdd_management.truncate_roadtrip()
for i in range(len(top_journeys)-1):
try:
from_city_insee = top_journeys[i]
to_city_insee = top_journeys[i+1]
route = requests.get(API_NAVITIA.format(
int(from_city_insee), int(to_city_insee), travel_date, API_KEY))
travels = json.loads(route.text)
# Contrôle des voyage reçus pour identifier le plus adapté à recherche
best_travel = travels["journeys"][0]
for travel in travels["journeys"]:
if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):
best_travel = travel
if best_travel['arrival_date_time'] > travel['arrival_date_time']:
best_travel = travel
# sauvegarde du trajet 'i' en base
save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)
# le prochain trajet devra avoir une date de départ > à la date de ce trajet
travel_date = best_travel['arrival_date_time']
except Exception as e:
print("!! Erreur durant le calcul du trajet entre '%s' et '%s'" %
(from_city_insee, to_city_insee))
#Ecriture du résumé du voyage
resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)
resume = resume.fetchone()
resume_description = """Début du voyage le {} . Arrivée le {}.
Le voyage à durée {} pour un total de {:d} kgeC""".format(
datetime_str_to_datetime_str(trip_start_date),
datetime_str_to_datetime_str(travel_date),
str(timedelta(seconds=resume[0])) ,
trunc( resume[1]/1000))
store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])
db_connector.commit()
except Exception as e:
db_connector.rollback()
print('Erreur durant la création du voyage. rollback effectué!!!')
print('print map with road-trip data')
visualization.generate_visualization()
print('Travel complete. Have nive trip!!!')
| ore trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def | identifier_body |
http.go | package util
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
. "github.com/gofiber/fiber/v2"
jsoniter "github.com/json-iterator/go"
"github.com/valyala/fasthttp"
)
//获取真实的IP 1.1.1.1, 2.2.2.2, 3.3.3.3
func CtxClientIP(ctx *fasthttp.RequestCtx) string {
clientIP := string(ctx.Request.Header.Peek("X-Forwarded-For"))
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
//获取最开始的一个 即 1.1.1.1
}
clientIP = strings.TrimSpace(clientIP)
if len(clientIP) > 0 {
return clientIP
}
clientIP = strings.TrimSpace(string(ctx.Request.Header.Peek("X-Real-Ip")))
if len(clientIP) > 0 {
return clientIP
}
return ctx.RemoteIP().String()
}
func GetIp(r *http.Request) string {
// var r *http.Request
ip := ClientPublicIP(r)
if ip == "" {
ip = ClientIP(r)
}
return ip
}
// DoURL 请求URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompat | urn ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^(((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))\.){3}((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))$`,
)
// IsValidIPv4 验证是否为合法的ipv4
func IsValidIPv4(ip string) bool {
return regIPv4.MatchString(ip)
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func FormIntDefault(r *http.Request, key string, def int) int {
i, err := strconv.Atoi(r.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func CtxFormIntDefault(ctx *Ctx, key string, def int) int {
i, err := strconv.Atoi(ctx.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntSliceDefault 获取Form参数 如果出错则返回默认值
func FormIntSliceDefault(r *http.Request, key, sep string, def []int) []int {
var i int
var err error
var rlt []int
for _, v := range strings.Split(r.FormValue(key), sep) {
i, err = strconv.Atoi(v)
if err != nil {
continue
}
rlt = append(rlt, i)
}
if rlt == nil {
return def
}
return rlt
}
// FormFileValue 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValue(r *http.Request, key string) (string, error) {
f, _, err := r.FormFile(key)
if err != nil {
return "", err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(b), nil
}
// FormFileValues 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValues(r *http.Request, key string) ([]string, error) {
if r.MultipartForm == nil {
err := r.ParseMultipartForm(32 << 20)
if err != nil {
return nil, err
}
}
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
var rlt = make([]string, 0, len(fhs))
for i := range fhs {
f, err := fhs[i].Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
if err != nil {
return nil, err
}
rlt = append(rlt, string(b))
}
return rlt, nil
}
}
return nil, http.ErrMissingFile
}
func GetToken(ctx *Ctx) (token string) {
return ctx.Get("X-Token")
}
| ibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
ret | identifier_body |
http.go | package util
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
. "github.com/gofiber/fiber/v2"
jsoniter "github.com/json-iterator/go"
"github.com/valyala/fasthttp"
)
//获取真实的IP 1.1.1.1, 2.2.2.2, 3.3.3.3
func CtxClientIP(ctx *fasthttp.RequestCtx) string {
clientIP := string(ctx.Request.Header.Peek("X-Forwarded-For"))
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
//获取最开始的一个 即 1.1.1.1
}
clientIP = strings.TrimSpace(clientIP)
if len(clientIP) > 0 {
return clientIP
}
clientIP = strings.TrimSpace(string(ctx.Request.Header.Peek("X-Real-Ip")))
if len(clientIP) > 0 {
return clientIP
}
return ctx.RemoteIP().String()
}
func GetIp(r *http.Request) string {
// var r *http.Request
ip := ClientPublicIP(r)
if ip == "" {
ip = ClientIP(r)
}
return ip
}
// DoURL 请求URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^(((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))\.){3}((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))$`,
)
| v4 验证是否为合法的ipv4
func IsValidIPv4(ip string) bool {
return regIPv4.MatchString(ip)
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func FormIntDefault(r *http.Request, key string, def int) int {
i, err := strconv.Atoi(r.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func CtxFormIntDefault(ctx *Ctx, key string, def int) int {
i, err := strconv.Atoi(ctx.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntSliceDefault 获取Form参数 如果出错则返回默认值
func FormIntSliceDefault(r *http.Request, key, sep string, def []int) []int {
var i int
var err error
var rlt []int
for _, v := range strings.Split(r.FormValue(key), sep) {
i, err = strconv.Atoi(v)
if err != nil {
continue
}
rlt = append(rlt, i)
}
if rlt == nil {
return def
}
return rlt
}
// FormFileValue 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValue(r *http.Request, key string) (string, error) {
f, _, err := r.FormFile(key)
if err != nil {
return "", err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(b), nil
}
// FormFileValues 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValues(r *http.Request, key string) ([]string, error) {
if r.MultipartForm == nil {
err := r.ParseMultipartForm(32 << 20)
if err != nil {
return nil, err
}
}
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
var rlt = make([]string, 0, len(fhs))
for i := range fhs {
f, err := fhs[i].Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
if err != nil {
return nil, err
}
rlt = append(rlt, string(b))
}
return rlt, nil
}
}
return nil, http.ErrMissingFile
}
func GetToken(ctx *Ctx) (token string) {
return ctx.Get("X-Token")
}
|
// IsValidIP | identifier_name |
http.go | package util
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
. "github.com/gofiber/fiber/v2"
jsoniter "github.com/json-iterator/go"
"github.com/valyala/fasthttp"
)
//获取真实的IP 1.1.1.1, 2.2.2.2, 3.3.3.3
func CtxClientIP(ctx *fasthttp.RequestCtx) string {
clientIP := string(ctx.Request.Header.Peek("X-Forwarded-For"))
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
//获取最开始的一个 即 1.1.1.1
}
clientIP = strings.TrimSpace(clientIP)
if len(clientIP) > 0 {
return clientIP
}
clientIP = strings.TrimSpace(string(ctx.Request.Header.Peek("X-Real-Ip")))
if len(clientIP) > 0 {
return clientIP
}
return ctx.RemoteIP().String()
}
func GetIp(r *http.Request) string {
// var r *http.Request
ip := ClientPublicIP(r)
if ip == "" {
ip = ClientIP(r)
}
return ip
}
// DoURL 请求URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body) |
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^(((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))\.){3}((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))$`,
)
// IsValidIPv4 验证是否为合法的ipv4
func IsValidIPv4(ip string) bool {
return regIPv4.MatchString(ip)
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func FormIntDefault(r *http.Request, key string, def int) int {
i, err := strconv.Atoi(r.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func CtxFormIntDefault(ctx *Ctx, key string, def int) int {
i, err := strconv.Atoi(ctx.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntSliceDefault 获取Form参数 如果出错则返回默认值
func FormIntSliceDefault(r *http.Request, key, sep string, def []int) []int {
var i int
var err error
var rlt []int
for _, v := range strings.Split(r.FormValue(key), sep) {
i, err = strconv.Atoi(v)
if err != nil {
continue
}
rlt = append(rlt, i)
}
if rlt == nil {
return def
}
return rlt
}
// FormFileValue 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValue(r *http.Request, key string) (string, error) {
f, _, err := r.FormFile(key)
if err != nil {
return "", err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(b), nil
}
// FormFileValues 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValues(r *http.Request, key string) ([]string, error) {
if r.MultipartForm == nil {
err := r.ParseMultipartForm(32 << 20)
if err != nil {
return nil, err
}
}
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
var rlt = make([]string, 0, len(fhs))
for i := range fhs {
f, err := fhs[i].Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
if err != nil {
return nil, err
}
rlt = append(rlt, string(b))
}
return rlt, nil
}
}
return nil, http.ErrMissingFile
}
func GetToken(ctx *Ctx) (token string) {
return ctx.Get("X-Token")
} | } | random_line_split |
http.go | package util
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
. "github.com/gofiber/fiber/v2"
jsoniter "github.com/json-iterator/go"
"github.com/valyala/fasthttp"
)
//获取真实的IP 1.1.1.1, 2.2.2.2, 3.3.3.3
func CtxClientIP(ctx *fasthttp.RequestCtx) string {
clientIP := string(ctx.Request.Header.Peek("X-Forwarded-For"))
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
//获取最开始的一个 即 1.1.1.1
}
clientIP = strings.TrimSpace(clientIP)
if len(clientIP) > 0 {
return clientIP
}
clientIP = strings.TrimSpace(string(ctx.Request.Header.Peek("X-Real-Ip")))
if len(clientIP) > 0 {
return clientIP
}
return ctx.RemoteIP().String()
}
func GetIp(r *http.Request) string {
// var r *http.Request
ip := ClientPublicIP(r)
if ip == "" {
ip = ClientIP(r)
}
return ip
}
// DoURL 请求URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return i | y)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^(((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))\.){3}((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))$`,
)
// IsValidIPv4 验证是否为合法的ipv4
func IsValidIPv4(ip string) bool {
return regIPv4.MatchString(ip)
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func FormIntDefault(r *http.Request, key string, def int) int {
i, err := strconv.Atoi(r.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntDefault 获取Form参数 如果出错则返回默认值
func CtxFormIntDefault(ctx *Ctx, key string, def int) int {
i, err := strconv.Atoi(ctx.FormValue(key))
if err != nil {
return def
}
return i
}
// FormIntSliceDefault 获取Form参数 如果出错则返回默认值
func FormIntSliceDefault(r *http.Request, key, sep string, def []int) []int {
var i int
var err error
var rlt []int
for _, v := range strings.Split(r.FormValue(key), sep) {
i, err = strconv.Atoi(v)
if err != nil {
continue
}
rlt = append(rlt, i)
}
if rlt == nil {
return def
}
return rlt
}
// FormFileValue 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValue(r *http.Request, key string) (string, error) {
f, _, err := r.FormFile(key)
if err != nil {
return "", err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(b), nil
}
// FormFileValues 快速获取表单提交的文件
// 也用于处理同表单一起提交的信息
func FormFileValues(r *http.Request, key string) ([]string, error) {
if r.MultipartForm == nil {
err := r.ParseMultipartForm(32 << 20)
if err != nil {
return nil, err
}
}
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
var rlt = make([]string, 0, len(fhs))
for i := range fhs {
f, err := fhs[i].Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
if err != nil {
return nil, err
}
rlt = append(rlt, string(b))
}
return rlt, nil
}
}
return nil, http.ErrMissingFile
}
func GetToken(ctx *Ctx) (token string) {
return ctx.Get("X-Token")
}
| outil.ReadAll(resp.Bod | conditional_block |
segment.ts | /**
* Created by user on 2018/5/16/016.
*/
import path = require("upath2");
import { crossSpawnSync } from '../index';
import { freeGC } from '../lib/util';
import ProjectConfig, { MAX_SCRIPT_TIMEOUT } from '../project.config';
import fs = require('fs-iconv');
import { useDefault, getDefaultModList } from 'novel-segment/lib';
import Segment from 'novel-segment/lib/Segment';
import TableDict from 'novel-segment/lib/table/dict';
import FastGlob = require('@bluelovers/fast-glob');
import Promise = require('bluebird');
import { crlf } from 'crlf-normalize';
import console from '../lib/log';
import Bluebird = require('bluebird');
// @ts-ignore
import BluebirdCancellation from 'bluebird-cancellation';
import { CancellationError, TimeoutError } from 'bluebird';
import { tw2cn_min, cn2tw_min, tableCn2TwDebug, tableTw2CnDebug } from 'cjk-conv/lib/zh/convert/min';
import { do_cn2tw_min } from '../lib/conv';
import { array_unique_overwrite } from 'array-hyper-unique';
export let DIST_NOVEL = ProjectConfig.novel_root;
export let CACHE_TIMEOUT = 3600;
export let _segmentObject: Segment;
export const ERROR_MSG_001 = `沒有搜尋到任何檔案 請檢查搜尋條件`;
export const CACHE_FILE = path.join(ProjectConfig.cache_root, 'cache.db');
export type IOptions = {
pathMain: string,
pathMain_out?: string,
novelID: string,
segment?: Segment,
novel_root?: string,
globPattern?: string[],
files?: string[],
hideLog?: boolean,
callback?(done_list: string[], file: string, index: number, length: number),
};
export function doSegmentGlob(options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
let globPattern = options.globPattern || [
'**/*.txt',
];
console.info('[do]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string, |
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
let bin = path.join(ProjectConfig.project_root, 'bin/_do_segment.js');
let _run_all: boolean = false;
_cache_segment.list[novelID] = _cache_segment.list[novelID] || {};
let _current_data = _cache_segment.list[novelID][novelID] = _cache_segment.list[novelID][novelID] || {};
let _handle_list: string[] = [];
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
await fs.readJSON(jsonfile)
.then(function (ls)
{
_handle_list.push(...ls);
})
.catch(e => null)
;
}
if (_current_data.d_ver != _d_ver || _current_data.s_ver != _s_ver)
{
console.debug({
pathMain,
novelID,
s_ver: _current_data.s_ver,
d_ver: _current_data.d_ver,
});
_run_all = true;
}
let cp = crossSpawnSync('node', [
'--max-old-space-size=2048',
//'--expose-gc',
bin,
'--pathMain',
pathMain,
'--novelID',
novelID,
'--runAll',
String(_run_all),
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
if (cp.status > 0)
{
crossSpawnSync('git', [
'commit',
'-a',
'-m',
`[Segment] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
}
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
let jsonfile_done = jsonfile + '.done';
await fs.readJSON(jsonfile_done)
.then(async function (ls: string[])
{
let CWD_IN = _path(pathMain, novelID);
let cjk_changed: boolean = false;
if (!fs.pathExistsSync(CWD_IN))
{
return;
}
ls = (ls || [])
.concat(_handle_list)
;
ls = array_unique_overwrite(ls);
if (!ls.length || !ls)
{
return;
}
return Bluebird
.mapSeries(ls, async function (file)
{
if (path.extname(file) == '.txt')
{
let fullpath = path.join(CWD_IN, file);
return fs.loadFile(fullpath, {
autoDecode: true,
})
.then(function (buf)
{
if (buf && buf.length)
{
let txt_old = String(buf);
let txt_new = do_cn2tw_min(txt_old)
.replace(/^\s*\n/, '')
.replace(/(?<=\n)\s*\n\s*$/, '')
;
if (txt_old != txt_new && txt_new)
{
cjk_changed = true;
return fs.writeFile(fullpath, txt_new)
.then(function ()
{
console.success(`[cjk-conv]`, file);
return fullpath;
})
}
return null;
}
return Promise.reject(buf);
})
.catch(e => {
console.error(e.message);
return null;
})
;
}
})
.mapSeries(function (fullpath)
{
fullpath && crossSpawnSync('git', [
'add',
fullpath,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
return fullpath
})
.tap(function ()
{
if (cjk_changed)
{
crossSpawnSync('git', [
'commit',
'-m',
`[cjk-conv] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
}
})
})
.catch(e => {
console.error(e.message);
})
;
}
_current_data.last_s_ver = _current_data.s_ver;
_current_data.last_d_ver = _current_data.d_ver;
_current_data.s_ver = _s_ver;
_current_data.d_ver = _d_ver;
return cp.status;
})
.then(() => true)
.catch(CancellationError, (e: CancellationError) => {
console.error(e.message);
return false;
})
.tap(async function ()
{
_cache_segment.last_s_ver = _cache_segment.s_ver;
_cache_segment.last_d_ver = _cache_segment.d_ver;
_cache_segment.s_ver = _s_ver;
_cache_segment.d_ver = _d_ver;
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
})
;
return cancellablePromise
.catch(CancellationError, (e) => {
return console.error(e.message);
});
} | },
}
},
}; | random_line_split |
segment.ts | /**
* Created by user on 2018/5/16/016.
*/
import path = require("upath2");
import { crossSpawnSync } from '../index';
import { freeGC } from '../lib/util';
import ProjectConfig, { MAX_SCRIPT_TIMEOUT } from '../project.config';
import fs = require('fs-iconv');
import { useDefault, getDefaultModList } from 'novel-segment/lib';
import Segment from 'novel-segment/lib/Segment';
import TableDict from 'novel-segment/lib/table/dict';
import FastGlob = require('@bluelovers/fast-glob');
import Promise = require('bluebird');
import { crlf } from 'crlf-normalize';
import console from '../lib/log';
import Bluebird = require('bluebird');
// @ts-ignore
import BluebirdCancellation from 'bluebird-cancellation';
import { CancellationError, TimeoutError } from 'bluebird';
import { tw2cn_min, cn2tw_min, tableCn2TwDebug, tableTw2CnDebug } from 'cjk-conv/lib/zh/convert/min';
import { do_cn2tw_min } from '../lib/conv';
import { array_unique_overwrite } from 'array-hyper-unique';
export let DIST_NOVEL = ProjectConfig.novel_root;
export let CACHE_TIMEOUT = 3600;
export let _segmentObject: Segment;
export const ERROR_MSG_001 = `沒有搜尋到任何檔案 請檢查搜尋條件`;
export const CACHE_FILE = path.join(ProjectConfig.cache_root, 'cache.db');
export type IOptions = {
pathMain: string,
pathMain_out?: string,
novelID: string,
segment?: Segment,
novel_root?: string,
globPattern?: string[],
files?: string[],
hideLog?: boolean,
callback?(done_list: string[], file: string, index: number, length: number),
};
export function doSegmentGlob(options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
let globPattern = options.globPattern || [
'**/*.txt',
];
console.info('[do]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_fil | t('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
let bin = path.join(ProjectConfig.project_root, 'bin/_do_segment.js');
let _run_all: boolean = false;
_cache_segment.list[novelID] = _cache_segment.list[novelID] || {};
let _current_data = _cache_segment.list[novelID][novelID] = _cache_segment.list[novelID][novelID] || {};
let _handle_list: string[] = [];
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
await fs.readJSON(jsonfile)
.then(function (ls)
{
_handle_list.push(...ls);
})
.catch(e => null)
;
}
if (_current_data.d_ver != _d_ver || _current_data.s_ver != _s_ver)
{
console.debug({
pathMain,
novelID,
s_ver: _current_data.s_ver,
d_ver: _current_data.d_ver,
});
_run_all = true;
}
let cp = crossSpawnSync('node', [
'--max-old-space-size=2048',
//'--expose-gc',
bin,
'--pathMain',
pathMain,
'--novelID',
novelID,
'--runAll',
String(_run_all),
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
if (cp.status > 0)
{
crossSpawnSync('git', [
'commit',
'-a',
'-m',
`[Segment] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
}
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
let jsonfile_done = jsonfile + '.done';
await fs.readJSON(jsonfile_done)
.then(async function (ls: string[])
{
let CWD_IN = _path(pathMain, novelID);
let cjk_changed: boolean = false;
if (!fs.pathExistsSync(CWD_IN))
{
return;
}
ls = (ls || [])
.concat(_handle_list)
;
ls = array_unique_overwrite(ls);
if (!ls.length || !ls)
{
return;
}
return Bluebird
.mapSeries(ls, async function (file)
{
if (path.extname(file) == '.txt')
{
let fullpath = path.join(CWD_IN, file);
return fs.loadFile(fullpath, {
autoDecode: true,
})
.then(function (buf)
{
if (buf && buf.length)
{
let txt_old = String(buf);
let txt_new = do_cn2tw_min(txt_old)
.replace(/^\s*\n/, '')
.replace(/(?<=\n)\s*\n\s*$/, '')
;
if (txt_old != txt_new && txt_new)
{
cjk_changed = true;
return fs.writeFile(fullpath, txt_new)
.then(function ()
{
console.success(`[cjk-conv]`, file);
return fullpath;
})
}
return null;
}
return Promise.reject(buf);
})
.catch(e => {
console.error(e.message);
return null;
})
;
}
})
.mapSeries(function (fullpath)
{
fullpath && crossSpawnSync('git', [
'add',
fullpath,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
return fullpath
})
.tap(function ()
{
if (cjk_changed)
{
crossSpawnSync('git', [
'commit',
'-m',
`[cjk-conv] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
}
})
})
.catch(e => {
console.error(e.message);
})
;
}
_current_data.last_s_ver = _current_data.s_ver;
_current_data.last_d_ver = _current_data.d_ver;
_current_data.s_ver = _s_ver;
_current_data.d_ver = _d_ver;
return cp.status;
})
.then(() => true)
.catch(CancellationError, (e: CancellationError) => {
console.error(e.message);
return false;
})
.tap(async function ()
{
_cache_segment.last_s_ver = _cache_segment.s_ver;
_cache_segment.last_d_ver = _cache_segment.d_ver;
_cache_segment.s_ver = _s_ver;
_cache_segment.d_ver = _d_ver;
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
})
;
return cancellablePromise
.catch(CancellationError, (e) => {
return console.error(e.message);
});
}
| e).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDic | conditional_block |
segment.ts | /**
* Created by user on 2018/5/16/016.
*/
import path = require("upath2");
import { crossSpawnSync } from '../index';
import { freeGC } from '../lib/util';
import ProjectConfig, { MAX_SCRIPT_TIMEOUT } from '../project.config';
import fs = require('fs-iconv');
import { useDefault, getDefaultModList } from 'novel-segment/lib';
import Segment from 'novel-segment/lib/Segment';
import TableDict from 'novel-segment/lib/table/dict';
import FastGlob = require('@bluelovers/fast-glob');
import Promise = require('bluebird');
import { crlf } from 'crlf-normalize';
import console from '../lib/log';
import Bluebird = require('bluebird');
// @ts-ignore
import BluebirdCancellation from 'bluebird-cancellation';
import { CancellationError, TimeoutError } from 'bluebird';
import { tw2cn_min, cn2tw_min, tableCn2TwDebug, tableTw2CnDebug } from 'cjk-conv/lib/zh/convert/min';
import { do_cn2tw_min } from '../lib/conv';
import { array_unique_overwrite } from 'array-hyper-unique';
export let DIST_NOVEL = ProjectConfig.novel_root;
export let CACHE_TIMEOUT = 3600;
export let _segmentObject: Segment;
export const ERROR_MSG_001 = `沒有搜尋到任何檔案 請檢查搜尋條件`;
export const CACHE_FILE = path.join(ProjectConfig.cache_root, 'cache.db');
export type IOptions = {
pathMain: string,
pathMain_out?: string,
novelID: string,
segment?: Segment,
novel_root?: string,
globPattern?: string[],
files?: string[],
hideLog?: boolean,
callback?(done_list: string[], file: string, index: number, length: number),
};
export function doSegmentGlob(options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
let globPattern = options.globPattern || [
'**/*.txt',
];
console.info('[do]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_v | g,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
let bin = path.join(ProjectConfig.project_root, 'bin/_do_segment.js');
let _run_all: boolean = false;
_cache_segment.list[novelID] = _cache_segment.list[novelID] || {};
let _current_data = _cache_segment.list[novelID][novelID] = _cache_segment.list[novelID][novelID] || {};
let _handle_list: string[] = [];
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
await fs.readJSON(jsonfile)
.then(function (ls)
{
_handle_list.push(...ls);
})
.catch(e => null)
;
}
if (_current_data.d_ver != _d_ver || _current_data.s_ver != _s_ver)
{
console.debug({
pathMain,
novelID,
s_ver: _current_data.s_ver,
d_ver: _current_data.d_ver,
});
_run_all = true;
}
let cp = crossSpawnSync('node', [
'--max-old-space-size=2048',
//'--expose-gc',
bin,
'--pathMain',
pathMain,
'--novelID',
novelID,
'--runAll',
String(_run_all),
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
if (cp.status > 0)
{
crossSpawnSync('git', [
'commit',
'-a',
'-m',
`[Segment] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
}
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
let jsonfile_done = jsonfile + '.done';
await fs.readJSON(jsonfile_done)
.then(async function (ls: string[])
{
let CWD_IN = _path(pathMain, novelID);
let cjk_changed: boolean = false;
if (!fs.pathExistsSync(CWD_IN))
{
return;
}
ls = (ls || [])
.concat(_handle_list)
;
ls = array_unique_overwrite(ls);
if (!ls.length || !ls)
{
return;
}
return Bluebird
.mapSeries(ls, async function (file)
{
if (path.extname(file) == '.txt')
{
let fullpath = path.join(CWD_IN, file);
return fs.loadFile(fullpath, {
autoDecode: true,
})
.then(function (buf)
{
if (buf && buf.length)
{
let txt_old = String(buf);
let txt_new = do_cn2tw_min(txt_old)
.replace(/^\s*\n/, '')
.replace(/(?<=\n)\s*\n\s*$/, '')
;
if (txt_old != txt_new && txt_new)
{
cjk_changed = true;
return fs.writeFile(fullpath, txt_new)
.then(function ()
{
console.success(`[cjk-conv]`, file);
return fullpath;
})
}
return null;
}
return Promise.reject(buf);
})
.catch(e => {
console.error(e.message);
return null;
})
;
}
})
.mapSeries(function (fullpath)
{
fullpath && crossSpawnSync('git', [
'add',
fullpath,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
return fullpath
})
.tap(function ()
{
if (cjk_changed)
{
crossSpawnSync('git', [
'commit',
'-m',
`[cjk-conv] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
}
})
})
.catch(e => {
console.error(e.message);
})
;
}
_current_data.last_s_ver = _current_data.s_ver;
_current_data.last_d_ver = _current_data.d_ver;
_current_data.s_ver = _s_ver;
_current_data.d_ver = _d_ver;
return cp.status;
})
.then(() => true)
.catch(CancellationError, (e: CancellationError) => {
console.error(e.message);
return false;
})
.tap(async function ()
{
_cache_segment.last_s_ver = _cache_segment.s_ver;
_cache_segment.last_d_ver = _cache_segment.d_ver;
_cache_segment.s_ver = _s_ver;
_cache_segment.d_ver = _d_ver;
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
})
;
return cancellablePromise
.catch(CancellationError, (e) => {
return console.error(e.message);
});
}
| er?: strin | identifier_name |
segment.ts | /**
* Created by user on 2018/5/16/016.
*/
import path = require("upath2");
import { crossSpawnSync } from '../index';
import { freeGC } from '../lib/util';
import ProjectConfig, { MAX_SCRIPT_TIMEOUT } from '../project.config';
import fs = require('fs-iconv');
import { useDefault, getDefaultModList } from 'novel-segment/lib';
import Segment from 'novel-segment/lib/Segment';
import TableDict from 'novel-segment/lib/table/dict';
import FastGlob = require('@bluelovers/fast-glob');
import Promise = require('bluebird');
import { crlf } from 'crlf-normalize';
import console from '../lib/log';
import Bluebird = require('bluebird');
// @ts-ignore
import BluebirdCancellation from 'bluebird-cancellation';
import { CancellationError, TimeoutError } from 'bluebird';
import { tw2cn_min, cn2tw_min, tableCn2TwDebug, tableTw2CnDebug } from 'cjk-conv/lib/zh/convert/min';
import { do_cn2tw_min } from '../lib/conv';
import { array_unique_overwrite } from 'array-hyper-unique';
export let DIST_NOVEL = ProjectConfig.novel_root;
export let CACHE_TIMEOUT = 3600;
export let _segmentObject: Segment;
export const ERROR_MSG_001 = `沒有搜尋到任何檔案 請檢查搜尋條件`;
export const CACHE_FILE = path.join(ProjectConfig.cache_root, 'cache.db');
export type IOptions = {
pathMain: string,
pathMain_out?: string,
novelID: string,
segment?: Segment,
novel_root?: string,
globPattern?: string[],
files?: string[],
hideLog?: boolean,
callback?(done_list: string[], file: string, index: number, length: number),
};
export function doSegmentGlob(options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
let globPattern = options.globPattern || [
'**/*.txt',
];
console.info('[do]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver | string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
let bin = path.join(ProjectConfig.project_root, 'bin/_do_segment.js');
let _run_all: boolean = false;
_cache_segment.list[novelID] = _cache_segment.list[novelID] || {};
let _current_data = _cache_segment.list[novelID][novelID] = _cache_segment.list[novelID][novelID] || {};
let _handle_list: string[] = [];
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
await fs.readJSON(jsonfile)
.then(function (ls)
{
_handle_list.push(...ls);
})
.catch(e => null)
;
}
if (_current_data.d_ver != _d_ver || _current_data.s_ver != _s_ver)
{
console.debug({
pathMain,
novelID,
s_ver: _current_data.s_ver,
d_ver: _current_data.d_ver,
});
_run_all = true;
}
let cp = crossSpawnSync('node', [
'--max-old-space-size=2048',
//'--expose-gc',
bin,
'--pathMain',
pathMain,
'--novelID',
novelID,
'--runAll',
String(_run_all),
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
if (cp.status > 0)
{
crossSpawnSync('git', [
'commit',
'-a',
'-m',
`[Segment] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: DIST_NOVEL,
});
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
}
{
let dir = path.join(ProjectConfig.cache_root, 'files', pathMain);
let jsonfile = path.join(dir, novelID + '.json');
let jsonfile_done = jsonfile + '.done';
await fs.readJSON(jsonfile_done)
.then(async function (ls: string[])
{
let CWD_IN = _path(pathMain, novelID);
let cjk_changed: boolean = false;
if (!fs.pathExistsSync(CWD_IN))
{
return;
}
ls = (ls || [])
.concat(_handle_list)
;
ls = array_unique_overwrite(ls);
if (!ls.length || !ls)
{
return;
}
return Bluebird
.mapSeries(ls, async function (file)
{
if (path.extname(file) == '.txt')
{
let fullpath = path.join(CWD_IN, file);
return fs.loadFile(fullpath, {
autoDecode: true,
})
.then(function (buf)
{
if (buf && buf.length)
{
let txt_old = String(buf);
let txt_new = do_cn2tw_min(txt_old)
.replace(/^\s*\n/, '')
.replace(/(?<=\n)\s*\n\s*$/, '')
;
if (txt_old != txt_new && txt_new)
{
cjk_changed = true;
return fs.writeFile(fullpath, txt_new)
.then(function ()
{
console.success(`[cjk-conv]`, file);
return fullpath;
})
}
return null;
}
return Promise.reject(buf);
})
.catch(e => {
console.error(e.message);
return null;
})
;
}
})
.mapSeries(function (fullpath)
{
fullpath && crossSpawnSync('git', [
'add',
fullpath,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
return fullpath
})
.tap(function ()
{
if (cjk_changed)
{
crossSpawnSync('git', [
'commit',
'-m',
`[cjk-conv] ${pathMain} ${novelID}`,
], {
stdio: 'inherit',
cwd: CWD_IN,
});
}
})
})
.catch(e => {
console.error(e.message);
})
;
}
_current_data.last_s_ver = _current_data.s_ver;
_current_data.last_d_ver = _current_data.d_ver;
_current_data.s_ver = _s_ver;
_current_data.d_ver = _d_ver;
return cp.status;
})
.then(() => true)
.catch(CancellationError, (e: CancellationError) => {
console.error(e.message);
return false;
})
.tap(async function ()
{
_cache_segment.last_s_ver = _cache_segment.s_ver;
_cache_segment.last_d_ver = _cache_segment.d_ver;
_cache_segment.s_ver = _s_ver;
_cache_segment.d_ver = _d_ver;
await fs.outputJSON(_cache_file_segment, _cache_segment, {
spaces: "\t",
});
})
;
return cancellablePromise
.catch(CancellationError, (e) => {
return console.error(e.message);
});
}
| ?: string,
d_ver?: string,
last_s_ver?: | identifier_body |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
}
fn default_layer(v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 | else {
while v != 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if !create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance != 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if !self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
}
| {
while v != 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} | conditional_block |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
}
fn default_layer(v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 {
while v != 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} else {
while v != 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if !create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance != 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
} | };
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if !self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
} | };
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.