file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
keyword_plan.pb.go | (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil |
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) isKeywordPlanForecastPeriod_Interval() {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x | {
return m.ForecastPeriod
} | conditional_block |
keyword_plan.pb.go | func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil {
return m.ForecastPeriod
}
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) | () {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x59, 0x8d, 0x27, 0x56, 0x43, 0x67, 0x97, 0xed, 0x3c, 0xbe, 0x12,
0xa4, 0x | isKeywordPlanForecastPeriod_Interval | identifier_name |
keyword_plan.pb.go | xxx_messageInfo_KeywordPlan.Merge(m, src)
}
func (m *KeywordPlan) XXX_Size() int {
return xxx_messageInfo_KeywordPlan.Size(m)
}
func (m *KeywordPlan) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlan.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlan proto.InternalMessageInfo
func (m *KeywordPlan) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *KeywordPlan) GetId() *wrappers.Int64Value {
if m != nil {
return m.Id
}
return nil
}
func (m *KeywordPlan) GetName() *wrappers.StringValue {
if m != nil {
return m.Name
}
return nil
}
func (m *KeywordPlan) GetForecastPeriod() *KeywordPlanForecastPeriod {
if m != nil {
return m.ForecastPeriod
}
return nil
}
// The forecasting period associated with the keyword plan.
type KeywordPlanForecastPeriod struct {
// Required. The date used for forecasting the Plan.
//
// Types that are valid to be assigned to Interval:
// *KeywordPlanForecastPeriod_DateInterval
// *KeywordPlanForecastPeriod_DateRange
Interval isKeywordPlanForecastPeriod_Interval `protobuf_oneof:"interval"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeywordPlanForecastPeriod) Reset() { *m = KeywordPlanForecastPeriod{} }
func (m *KeywordPlanForecastPeriod) String() string { return proto.CompactTextString(m) }
func (*KeywordPlanForecastPeriod) ProtoMessage() {}
func (*KeywordPlanForecastPeriod) Descriptor() ([]byte, []int) {
return fileDescriptor_db2ef87e79a4b462, []int{1}
}
func (m *KeywordPlanForecastPeriod) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeywordPlanForecastPeriod.Unmarshal(m, b)
}
func (m *KeywordPlanForecastPeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeywordPlanForecastPeriod.Marshal(b, m, deterministic)
}
func (m *KeywordPlanForecastPeriod) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeywordPlanForecastPeriod.Merge(m, src)
}
func (m *KeywordPlanForecastPeriod) XXX_Size() int {
return xxx_messageInfo_KeywordPlanForecastPeriod.Size(m)
}
func (m *KeywordPlanForecastPeriod) XXX_DiscardUnknown() {
xxx_messageInfo_KeywordPlanForecastPeriod.DiscardUnknown(m)
}
var xxx_messageInfo_KeywordPlanForecastPeriod proto.InternalMessageInfo
type isKeywordPlanForecastPeriod_Interval interface {
isKeywordPlanForecastPeriod_Interval()
}
type KeywordPlanForecastPeriod_DateInterval struct {
DateInterval enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval `protobuf:"varint,1,opt,name=date_interval,json=dateInterval,proto3,enum=google.ads.googleads.v2.enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval,oneof"`
}
type KeywordPlanForecastPeriod_DateRange struct {
DateRange *common.DateRange `protobuf:"bytes,2,opt,name=date_range,json=dateRange,proto3,oneof"`
}
func (*KeywordPlanForecastPeriod_DateInterval) isKeywordPlanForecastPeriod_Interval() {}
func (*KeywordPlanForecastPeriod_DateRange) isKeywordPlanForecastPeriod_Interval() {}
func (m *KeywordPlanForecastPeriod) GetInterval() isKeywordPlanForecastPeriod_Interval {
if m != nil {
return m.Interval
}
return nil
}
func (m *KeywordPlanForecastPeriod) GetDateInterval() enums.KeywordPlanForecastIntervalEnum_KeywordPlanForecastInterval {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateInterval); ok {
return x.DateInterval
}
return enums.KeywordPlanForecastIntervalEnum_UNSPECIFIED
}
func (m *KeywordPlanForecastPeriod) GetDateRange() *common.DateRange {
if x, ok := m.GetInterval().(*KeywordPlanForecastPeriod_DateRange); ok {
return x.DateRange
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*KeywordPlanForecastPeriod) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*KeywordPlanForecastPeriod_DateInterval)(nil),
(*KeywordPlanForecastPeriod_DateRange)(nil),
}
}
func init() {
proto.RegisterType((*KeywordPlan)(nil), "google.ads.googleads.v2.resources.KeywordPlan")
proto.RegisterType((*KeywordPlanForecastPeriod)(nil), "google.ads.googleads.v2.resources.KeywordPlanForecastPeriod")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/keyword_plan.proto", fileDescriptor_db2ef87e79a4b462)
}
var fileDescriptor_db2ef87e79a4b462 = []byte{
// 485 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdd, 0x6a, 0xd4, 0x40,
0x14, 0xc7, 0x9b, 0xb4, 0x88, 0x9d, 0x7e, 0x28, 0xb9, 0x5a, 0x6b, 0x91, 0xb6, 0x52, 0xa8, 0x0a,
0x13, 0x89, 0xc5, 0x8b, 0xe8, 0x4d, 0x16, 0xb5, 0x1f, 0x82, 0x2c, 0x11, 0xf6, 0xa2, 0x2c, 0x2c,
0xd3, 0x9d, 0xb3, 0x21, 0x98, 0xcc, 0x84, 0x99, 0xc9, 0x16, 0x2f, 0x7d, 0x15, 0x2f, 0x7d, 0x14,
0x1f, 0xc5, 0x17, 0xd0, 0x1b, 0x41, 0x32, 0x5f, 0xb4, 0xd8, 0x74, 0xef, 0xce, 0xd9, 0xf9, 0x9d,
0xff, 0xff, 0x7c, 0x64, 0xd1, 0x71, 0xc1, 0x79, 0x51, 0x41, 0x4c, 0xa8, 0x8c, 0x4d, 0xd8, 0x45,
0x8b, 0x24, 0x16, 0x20, 0x79, 0x2b, 0x66, 0x20, 0xe3, 0x2f, 0xf0, 0xf5, 0x8a, 0x0b, 0x3a, 0x6d,
0x2a, 0xc2, 0x70, 0x23, 0xb8, 0xe2, 0xd1, 0xbe, 0x41, 0x31, 0xa1, 0x12, 0xfb, 0x2a, 0xbc, 0x48,
0xb0, 0xaf, 0xda, 0x79, 0xde, 0x27, 0x3c, 0xe3, 0x75, 0xcd, 0x59, 0x4c, 0x89, 0x02, 0x69, 0xe4,
0x76, 0x86, 0x7d, 0x2c, 0xb0, 0xb6, 0xbe, 0xd9, 0xc0, 0x74, 0xce, 0x05, 0xcc, 0x88, 0x54, 0xd3,
0x92, 0x29, 0x10, 0x0b, 0x52, 0x5 | return xxx_messageInfo_KeywordPlan.Marshal(b, m, deterministic)
}
func (m *KeywordPlan) XXX_Merge(src proto.Message) {
| random_line_split | |
main.go | _workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string |
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
| {
return strings.Split(id, "!")[0]
} | identifier_body |
main.go | _workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end | // Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
| {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER | conditional_block |
main.go | _workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func | (socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id
} else if is_bda_workload(path.(string)) {
bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
| recvBatsimMessage | identifier_name |
main.go | _workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "bda.json")
}
func is_hpc_workload(workload_path string) bool {
return strings.HasSuffix(workload_path, "hpc.json")
}
func getWorkloadID(id string) string {
return strings.Split(id, "!")[0]
}
func recvBatsimMessage(socket *zmq.Socket) ([]byte, BatMessage) {
msg, err = socket.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
// reset message structure
jmsg = BatMessage{}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
return msg, jmsg
}
func removeEvents(to_remove_indexes []int, events *[]Event) {
// Do a reverse range to avoid index error
last := len(to_remove_indexes)-1
for i := range to_remove_indexes {
reverse_i := to_remove_indexes[last - i]
(*events) = append(
(*events)[:reverse_i],
(*events)[reverse_i+1:]...
)
}
}
func main() {
bat_host := "127.0.0.1"
bat_port := "28000"
bat_sock := NewReplySocket(bat_host, bat_port)
defer bat_sock.Close()
hpc_host := "127.0.0.1"
hpc_port := "28001"
hpc_sock := NewRequestSocket(hpc_host, hpc_port)
defer hpc_sock.Close()
bda_host := "127.0.0.1"
bda_port := "28002"
bda_sock := NewRequestSocket(bda_host, bda_port)
defer bda_sock.Close()
hpc_workload := "Not found"
bda_workload := "Not found"
var bda_reply BatMessage
var hpc_reply BatMessage
var bda_events []Event
var hpc_events []Event
var common_events []Event
// var epilog_blocked_hpc_events []Event
var to_remove_indexes []int
var now float64
var err error
this_is_the_end := false
resumited_bda_workload := "resubmit"
prolog_blocked_hpc_events := map[string]Event{}
// main loop
for !this_is_the_end {
// clean structures
hpc_events = []Event{}
bda_events = []Event{}
common_events = []Event{}
jmsg = BatMessage{}
bda_reply = BatMessage{}
hpc_reply = BatMessage{}
msg, err = bat_sock.RecvBytes(0)
if err != nil {
panic("Error while receiving Batsim message: " + err.Error())
}
if err := json.Unmarshal(msg, &jmsg); err != nil {
panic(err)
}
fmt.Println("Batsim -> Broker:\n", string(msg))
// BATSIM --> BROKER
// Inspect Batsim request
now = jmsg.Now
for _, event := range jmsg.Events {
switch event.Type {
case "SIMULATION_BEGINS":
{
fmt.Println("Hello Batsim!")
// get workload/scheduler mapping
for id, path := range event.Data["workloads"].(map[string]interface{}) {
if is_hpc_workload(path.(string)) {
hpc_workload = id | bda_workload = id
}
}
fmt.Println("HPC Workload id is: ", hpc_workload)
fmt.Println("BDA Workload id is: ", bda_workload)
common_events = append(common_events, event)
}
case "JOB_SUBMITTED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
// WARN Dynamically submitted jobs are always given to BDA
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
default:
panic("This event should go somewhere!")
}
}
case "JOB_KILLED":
{
// Split message events using first job workload id
// FIXME check if all jobs are from the same workload
switch getWorkloadID(event.Data["job_ids"].([]interface{})[0].(string)) {
case hpc_workload:
{
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "JOB_COMPLETED":
{
// Split message events using workload id
switch getWorkloadID(event.Data["job_id"].(string)) {
case hpc_workload:
{
// manage HPC jobs epilog here
fmt.Println("Trigger HPC job epilog for resources: ", event.Data["alloc"])
// Give back the allocated resources to BDA
new_event := Event{
Timestamp: now,
Type: "ADD_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
// wait for the resources to be added to the BDA resource
// pool before notifiing the HPC scheduler that the job is
// complete
//epilog_blocked_hpc_events = append(epilog_blocked_hpc_events, event)
hpc_events = append(hpc_events, event)
}
case bda_workload, resumited_bda_workload:
{
bda_events = append(bda_events, event)
}
}
}
case "SIMULATION_ENDS":
{
fmt.Println("Bye Bye!")
common_events = append(common_events, event)
this_is_the_end = true
}
}
}
// Forward the message to one scheduler or both depending on the workload id
// And receive response from both (send empty event if nothing to send
// to sync time)
//
// /--- HPC
// BROKER --
// \--- BDA
// merge HPC specific events and common events
hpc_events = append(hpc_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: hpc_events})
// send
hpc_sock.SendBytes(msg, 0)
fmt.Println("Broker -> HPC:\n", string(msg))
// get reply
hpc_reply_json, hpc_reply = recvBatsimMessage(hpc_sock)
fmt.Println("Broker <= HPC:\n", string(hpc_reply_json))
// Inspect HPC response
to_remove_indexes = []int{}
for index, event := range hpc_reply.Events {
switch event.Type {
case "EXECUTE_JOB":
{
// Trigger HPC job prolog here
// Run Bebida HPC job prolog
fmt.Println("Trigger HPC job prolog for resources: ", event.Data["alloc"])
// Ask BDA to remove allocated resources
new_event := Event{
Timestamp: hpc_reply.Now,
Type: "REMOVE_RESOURCES",
Data: map[string]interface{}{"resources": event.Data["alloc"]},
}
bda_events = append(bda_events, new_event)
prolog_blocked_hpc_events[event.Data["alloc"].(string)] = event
to_remove_indexes = append(to_remove_indexes, index)
}
}
}
// Hold events by removing them from events to forward
removeEvents(to_remove_indexes, &hpc_reply.Events)
// merge BDA specific events and common events
bda_events = append(bda_events, common_events...)
// create the message
msg, err = json.Marshal(BatMessage{Now: now, Events: bda_events})
// send
bda_sock.SendBytes(msg, 0)
fmt.Println("Broker -> BDA:\n", string(msg))
// get reply
bda_reply_json, bda_reply = recvBatsimMessage(bda_sock)
fmt.Println("Broker <= BDA:\n", string(bda_reply_json))
// Inspect BDA reply
to_remove_indexes = []int{}
for index, event := range bda_reply.Events {
switch event.Type {
case "RESOURCES_REMOVED":
{
// End of prolog: Resource removed event from BDA so release the message
// get blocked event from allocation
to_add_event := prolog_blocked_hpc_events[event.Data["resources"].(string)]
if to_add_event.Data["alloc"] == nil {
panic(fmt.Sprintf("Error in prolog: The resource removed ack (%s) seems to be already acknowledged", event.Data["resources"]))
|
} else if is_bda_workload(path.(string)) { | random_line_split |
qutex.rs | derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn | (&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
| direct_unlock | identifier_name |
qutex.rs | derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() | else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
| {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} | conditional_block |
qutex.rs | derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
} | let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
|
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> { | random_line_split |
qutex.rs | (Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> |
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
| {
Qutex {
inner: self.inner.clone(),
}
} | identifier_body |
projects.py | )
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def | (self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed | scheduled_deletion_warning | identifier_name |
projects.py | =True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed in a
series job (if there is more than one).
"""
# Do not create individual pull jobs here because series job children
# are run in order of their ids; so we need to sort into groups first.
groups: Dict[int, List] = {}
for source in self.sources.all():
order = source.order or 0
if order in groups:
groups[order].append(source)
else:
groups[order] = [source]
steps: List[Job] = []
for order in sorted(groups.keys()):
sources = groups[order]
if len(sources) == 1:
steps.append(sources[0].pull(user))
else:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Pull sources in parallel",
)
parallel.children.set([source.pull(user) for source in sources])
steps.append(parallel)
if len(steps) == 1:
return steps[0]
else:
series = Job.objects.create(
project=self,
creator=user,
method=JobMethod.series.name,
description="Pull sources in series",
)
series.children.set(steps)
return series
def reflow(self, user: User) -> Optional[Job]:
"""
Reflow the dependencies between the project's files by rerunning jobs.
For all `current` files that have `upstreams` creates a new job that
re-executes the original job. Because jobs can have `secrets` and callbacks
to the original project, rather than creating a copy of the original job
we go through the `File` method e.g. `File.convert`. This more safely enables
project forking etc.
In the future should do a topological sort so that the
jobs get executed in parallel if possible, and in series if necessary.
"""
subjobs = []
for file in self.files.filter(
current=True,
upstreams__isnull=False,
# Currently limited to convert jobs but in future there
# may be other jobs that create a derived file
# e.g. running a script that create files.
job__method=JobMethod.convert.name,
).exclude(
# Currently exclude index.html files because dealt with
# in an explicit step in snapshot
Q(path="index.html")
# Exclude .bib and image files which are created
# as children of a parent file's generation
# See https://github.com/stencila/hub/issues/1024#issuecomment-799128207
| Q(path__endswith=".bib")
| Q(path__endswith=".png")
| Q(path__endswith=".jpg"),
):
# Convert jobs only have one upstream
upstream = file.upstreams.first()
subjob = upstream.convert(user, file.path)
subjobs.append(subjob)
if len(subjobs) > 0:
parallel = Job.objects.create(
project=self,
creator=user,
method=JobMethod.parallel.name,
description="Update derived files",
)
parallel.children.set(subjobs)
return parallel
else:
return None
def pin(self, user: User, **callback) -> Job:
"""
Pin the project's container image.
Does not change the project's `container_image` field, but
rather, returns a pinned version of it. The callback should
use that value.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.pin.name,
params=dict(container_image=self.container_image,),
description=f"Pin container image for project '{self.name}'",
**callback,
)
def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:
"""
Archive the project's working directory.
Creates a copy of the project's working directory
on the `snapshots` storage.
"""
# Get the upload policy
policy = snapshots_storage().generate_post_policy(path)
url = policy.get("url") if policy else None
secrets = policy.get("fields") if policy else None
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.archive.name,
params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),
secrets=secrets,
description=f"Archive project '{self.name}'",
**callback,
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the project.
"""
job = Job.objects.create(
project=self,
creator=request.user if request.user.is_authenticated else None,
method=JobMethod.session.name,
params=dict(container_image=self.container_image),
description=f"Session for project '{self.name}'",
)
job.add_user(request)
return job
def make_project_creator_an_owner(
sender, instance: Project, created: bool, *args, **kwargs
):
"""
Make the project create an owner.
Makes sure each project has at least one owner.
"""
if sender is Project and created and instance.creator:
ProjectAgent.objects.create(
project=instance, user=instance.creator, role=ProjectRole.OWNER.name
)
post_save.connect(make_project_creator_an_owner, sender=Project)
class ProjectRole(EnumChoice):
| """
A user or team role within an account.
See `get_description` for what each role can do.
Some of roles can also be applied to the public.
For example, a project might be made public with
the `REVIEWER` role allowing anyone to comment.
"""
READER = "Reader"
REVIEWER = "Reviewer"
EDITOR = "Editor"
AUTHOR = "Author"
MANAGER = "Manager"
OWNER = "Owner"
@classmethod
def get_description(cls, role: "ProjectRole"):
"""Get the description of a project role."""
return { | identifier_body | |
projects.py | null=True,
blank=True,
help_text="The name of the theme to use as the default when generating content for this project."
# See note for the `Account.theme` field for why this is a TextField.
)
extra_head = models.TextField(
null=True,
blank=True,
help_text="Content to inject into the <head> element of HTML served for this project.",
)
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from | blank=True,
help_text="When the image file was last updated (e.g. from image_path).",
)
theme = models.TextField( | random_line_split | |
projects.py | )
extra_top = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the top of the <body> element of HTML served for this project.",
)
extra_bottom = models.TextField(
null=True,
blank=True,
help_text="Content to inject at the bottom of the <body> element of HTML served for this project.",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this project.",
)
session_timeout = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of time of inactivity after which a session will end (s).",
)
session_timelimit = models.PositiveIntegerField(
null=True, blank=True, help_text="The maximum duration of a session (s)."
)
session_memory = models.PositiveIntegerField(
null=True,
blank=True,
help_text="The amount of memory allocated (request and limit) for a session (MiB).",
)
main = models.TextField(
null=True, blank=True, help_text="Path of the main file of the project",
)
liveness = models.CharField(
max_length=16,
choices=ProjectLiveness.as_choices(),
default=ProjectLiveness.LATEST.value,
help_text="Where to serve the content for this project from.",
)
pinned = models.ForeignKey(
"Snapshot",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="project_pinned",
help_text="If pinned, the snapshot to pin to, when serving content.",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["account", "name"], name="%(class)s_unique_account_name"
)
]
# Time between creation and scheduled deletion
TEMPORARY_PROJECT_LIFESPANS = {
# Time for the "temp" account
"temp": datetime.timedelta(days=1),
# Time for all other accounts
"default": datetime.timedelta(days=7),
}
# Time before schedule deletion for warning email
TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2)
STORAGE = working_storage()
def __str__(self):
return self.name
def get_meta(self) -> Meta:
"""
Get the metadata to include in the head of the project's pages.
"""
return Meta(
object_type="article",
title=self.title or self.name,
description=self.description,
image=self.image_file.url if self.image_file else None,
)
def set_image_from_file(self, file):
"""
Update the image file for the project from the path of a file within it.
"""
if isinstance(file, str):
try:
file = self.files.filter(current=True, path=file)[0]
except IndexError:
return
content = file.get_content()
format = file.get_format()
ext = format.default_extension if format else ""
# The file name needs to be unique to bust any caches.
file = ContentFile(content)
file.name = f"{self.id}-{shortuuid.uuid()}{ext}"
self.image_file = file
self.image_updated = timezone.now()
self.save()
def update_image(self):
"""
Update the image for the project.
"""
modified_since = (
dict(modified__gt=self.image_updated) if self.image_updated else {}
)
if self.image_path and self.image_path != "__uploaded__":
# Does the file need updating?
|
else:
# Try to find an image for the project and use the most
# recently modified since the image was last updated
images = self.files.filter(
current=True, mimetype__startswith="image/", **modified_since,
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0])
def update_image_all_projects(self):
"""
Update the image of all projects.
"""
projects = Project.objects.all(temporary=False)
for project in projects:
project.update_image()
@property
def scheduled_deletion_time(self) -> Optional[datetime.datetime]:
"""
Get the scheduled deletion time of a temporary project.
"""
if not self.temporary:
return None
delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(
self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default")
)
return self.created + delta
@property
def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:
"""
Get the scheduled time for a warning of deletion email to be send to project owner.
"""
time = self.scheduled_deletion_time
return time - Project.TEMPORARY_PROJECT_WARNING if time else None
def get_main(self):
"""
Get the main file for the project.
The main file can be designated by the user
(using the `main` field as the path). If no file
matches that path (e.g. because it was removed),
or if `main` was never set, then this defaults to the
most recently modified file with path `main.*` or `README.*`
if those are present.
"""
if self.main:
try:
# Using `filter()` and indexing to get the first item is more robust that
# using `get()`. There should only be one item with path that is current
# but this avoids a `MultipleObjectsReturned` in cases when there is not.
return self.files.filter(path=self.main, current=True).order_by(
"-created"
)[0]
except IndexError:
pass
candidates = self.files.filter(
Q(path__startswith="main.") | Q(path__startswith="README."), current=True
).order_by("-modified")
if len(candidates):
return candidates[0]
return None
def get_theme(self) -> str:
"""Get the theme for the project."""
return self.theme or self.account.theme
def content_url(self, snapshot=None, path=None, live=False) -> str:
"""
Get the URL that the content for this project is served on.
This is the URL, on the account subdomain,
that content for the project is served from.
"""
params: Dict = {}
if settings.CONFIGURATION.endswith("Dev"):
# In development, it's very useful to be able to preview
# content, so we return a local URL
url = (
reverse("ui-accounts-content", kwargs=dict(project_name=self.name))
+ "/"
)
params.update(account=self.account.name)
else:
# In production, return an account subdomain URL
url = "https://{account}.{domain}/{project}/".format(
account=self.account.name,
domain=settings.ACCOUNTS_DOMAIN,
project=self.name,
)
# Defaults to generating a URL for the latest snapshot
# unless specific snapshot, or live is True
if live:
url += "live/"
elif snapshot:
url += "v{0}/".format(snapshot.number)
if not self.public:
url += "~{0}/".format(self.key)
if path:
url += path
if params:
url += "?" + urlencode(params)
return url
def file_location(self, file: str) -> str:
"""
Get the location of one of the project's files relative to the root of the storage volume.
"""
return os.path.join(str(self.id), file)
def event(self, data: dict, source=None):
"""
Handle an event notification.
Records the event and evaluates each project trigger.
"""
ProjectEvent.objects.create(project=self, data=data, source=source)
# TODO: Evaluate each project trigger
# #for trigger in self.triggers.all():
# trigger.evaluate(event=event, context=dict(event=event, source=source))
def cleanup(self, user: User) -> Job:
"""
Clean the project's working directory.
Removes all files from the working directory.
In the future, this may be smarter and only remove
those files that are orphaned (i.e. not registered as part of the pipeline).
This is not called `clean()` because that clashes with
`Model.clean()` which gets called, for example, after the submission
of a form in the admin interface.
"""
return Job.objects.create(
project=self,
creator=user,
method=JobMethod.clean.name,
description=f"Clean project '{self.name}'",
**Job.create_callback(self, "cleanup_callback"),
)
def cleanup_callback(self, job: Job):
"""
Set all project files as non-current.
This will remove derived files (e.g. converted from another format) and
files from a source.
"""
from projects.models.files import File
File.objects.filter(project=self, current=True).update(current=False)
def pull(self, user: User) -> Job:
"""
Pull all the project's sources into its working directory.
Groups sources by `order` (with `null` order first i.e. can be overridden).
If there are more than one source in each group creates a `parallel` job
having children jobs that `pull`s each source. Groups are then placed | images = self.files.filter(
current=True, path=self.image_path, **modified_since
).order_by("-modified")
if len(images) > 0:
self.set_image_from_file(images[0]) | conditional_block |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def | (
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is | find | identifier_name |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self): | Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is | # type: () -> 'Multivalue'
""" | random_line_split |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
|
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is | if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList") | conditional_block |
sbfdinitiator_ef4ed37c4520e95225e35be31ea6dde4.py | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SbfdInitiator(Base):
"""
The SbfdInitiator class encapsulates a required sbfdInitiator resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "sbfdInitiator"
_SDM_ATT_MAP = {
"Active": "active",
"Count": "count",
"DescriptiveName": "descriptiveName",
"DestIPAddr": "destIPAddr",
"MplsLabelCount": "mplsLabelCount",
"MyDiscriminator": "myDiscriminator",
"Name": "name",
"PeerDiscriminator": "peerDiscriminator",
"SessionInfo": "sessionInfo",
"TimeoutMultiplier": "timeoutMultiplier",
"TxInterval": "txInterval",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(SbfdInitiator, self).__init__(parent, list_op)
@property
def MplsLabelList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827.MplsLabelList): An instance of the MplsLabelList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplslabellist_37213b54082ea2315b262cbc86661827 import (
MplsLabelList,
)
if len(self._object_properties) > 0:
if self._properties.get("MplsLabelList", None) is not None:
return self._properties.get("MplsLabelList")
return MplsLabelList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["Active"]))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP["Count"])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP["DescriptiveName"])
@property
def DestIPAddr(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Destination IP address in SBFD Packet,which is sent to Responder. Should be in 127 subnet as defined in specification.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["DestIPAddr"]))
@property
def MplsLabelCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of MPLS Labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["MplsLabelCount"])
@MplsLabelCount.setter
def MplsLabelCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MplsLabelCount"], value)
@property
def MyDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value to be used for My Discriminator in S-BFD packets sent to the Responder by this Initiator. Should be unique in sessions from a single Initiator.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["MyDiscriminator"])
)
@property
def Name(self):
# type: () -> str
|
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def PeerDiscriminator(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configured Peer Discriminator which should match the configured Local or My Discriminator on the target Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["PeerDiscriminator"])
)
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[adminDown | down | up]): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is responding correctly or not.
"""
return self._get_attribute(self._SDM_ATT_MAP["SessionInfo"])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If packets are not recieved within the negotiated transmit Interval * this value , session is brought down and Flap Count is increased in statistics.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["TimeoutMultiplier"])
)
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tx Interval in Milli Seconds. Note: Initial transmission interval is set to maximum of 1s and configured Tx Interval. Once session comes up, the timer will auto-transition to the negotiated value i.e. maximum of local Tx Interval and recieved Rx Interval from Responder.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP["TxInterval"]))
def update(self, MplsLabelCount=None, Name=None):
# type: (int, str) -> SbfdInitiator
"""Updates sbfdInitiator resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
Count=None,
DescriptiveName=None,
MplsLabelCount=None,
Name=None,
SessionInfo=None,
):
# type: (int, str, int, str, List[str]) -> SbfdInitiator
"""Finds and retrieves sbfdInitiator resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve sbfdInitiator resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all sbfdInitiator resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- MplsLabelCount (number): Number of MPLS Labels.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[adminDown | down | up])): Current state of the S-BFD Initiator Session. It is normally Up or Down depending on whether Responder is | """
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"]) | identifier_body |
main.go | (&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
| }
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name | return
}
| conditional_block |
main.go | (&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
| serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
if isGlobalServer(endpoint) {
server = &configv1alpha1.Server{
Name: name | promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers) | identifier_body |
main.go | var descriptor = cliv1alpha1.PluginDescriptor{
Name: "login",
Description: "Login to the platform",
Group: cliv1alpha1.SystemCmdGroup,
Aliases: []string{"lo", "logins"},
}
var (
stderrOnly, forceCSP, staging bool
endpoint, name, apiToken, server, kubeConfig, kubecontext string
)
const (
knownGlobalHost = "cloud.vmware.com"
)
func main() {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
| "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
)
| random_line_split | |
main.go | () {
p, err := plugin.NewPlugin(&descriptor)
if err != nil {
log.Fatal(err)
}
p.Cmd.Flags().StringVar(&endpoint, "endpoint", "", "endpoint to login to")
p.Cmd.Flags().StringVar(&name, "name", "", "name of the server")
p.Cmd.Flags().StringVar(&apiToken, "apiToken", "", "API token for global login")
p.Cmd.Flags().StringVar(&server, "server", "", "login to the given server")
p.Cmd.Flags().StringVar(&kubeConfig, "kubeconfig", "", "path to kubeconfig management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*])")
p.Cmd.Flags().StringVar(&kubecontext, "context", "", "the context in the kubeconfig to use for management cluster. Valid only if user doesn't choose 'endpoint' option.(See [*]) ")
p.Cmd.Flags().BoolVar(&stderrOnly, "stderr-only", false, "send all output to stderr rather than stdout")
p.Cmd.Flags().BoolVar(&forceCSP, "force-csp", false, "force the endpoint to be logged in as a csp server")
p.Cmd.Flags().BoolVar(&staging, "staging", false, "use CSP staging issuer")
p.Cmd.Flags().MarkHidden("stderr-only") //nolint
p.Cmd.Flags().MarkHidden("force-csp") //nolint
p.Cmd.Flags().MarkHidden("staging") //nolint
p.Cmd.RunE = login
p.Cmd.Example = `
# Login to TKG management cluster using endpoint
tanzu login --endpoint "https://login.example.com" --name mgmt-cluster
# Login to TKG management cluster by using kubeconfig path and context for the management cluster
tanzu login --kubeconfig path/to/kubeconfig --context path/to/context --name mgmt-cluster
# Login to TKG management cluster by using default kubeconfig path and context for the management cluster
tanzu login --context path/to/context --name mgmt-cluster
# Login to an existing server
tanzu login --server mgmt-cluster
[*] : User has two options to login to TKG. User can choose the login endpoint option
by providing 'endpoint', or user can choose to use the kubeconfig for the management cluster by
providing 'kubeconfig' and 'context'. If only '--context' is set and '--kubeconfig' is unset
$KUBECONFIG env variable would be used and, if $KUBECONFIG env is also unset default
kubeconfig($HOME/.kube/config) would be used
`
if err := p.Execute(); err != nil {
os.Exit(1)
}
}
func login(cmd *cobra.Command, args []string) (err error) {
cfg, err := config.GetClientConfig()
if _, ok := err.(*config.ClientConfigNotExistError); ok {
cfg, err = config.NewClientConfig()
if err != nil {
return err
}
} else if err != nil {
return err
}
newServerSelector := "+ new server"
var serverTarget *configv1alpha1.Server
if name != "" {
serverTarget, err = createNewServer()
if err != nil {
return err
}
} else if server == "" {
serverTarget, err = getServerTarget(cfg, newServerSelector)
if err != nil {
return err
}
} else {
serverTarget, err = config.GetServer(server)
if err != nil {
return err
}
}
if server == newServerSelector {
serverTarget, err = createNewServer()
if err != nil {
return err
}
}
if serverTarget.Type == configv1alpha1.GlobalServerType {
return globalLogin(serverTarget)
}
return managementClusterLogin(serverTarget)
}
func getServerTarget(cfg *configv1alpha1.ClientConfig, newServerSelector string) (*configv1alpha1.Server, error) {
promptOpts := getPromptOpts()
servers := map[string]*configv1alpha1.Server{}
for _, server := range cfg.KnownServers {
ep, err := config.EndpointFromServer(server)
if err != nil {
return nil, err
}
s := rpad(server.Name, 20)
s = fmt.Sprintf("%s(%s)", s, ep)
servers[s] = server
}
if endpoint == "" {
endpoint, _ = os.LookupEnv(config.EnvEndpointKey)
}
// If there are no existing servers
if len(servers) == 0 {
return createNewServer()
}
serverKeys := getKeys(servers)
serverKeys = append(serverKeys, newServerSelector)
servers[newServerSelector] = &configv1alpha1.Server{}
err := component.Prompt(
&component.PromptConfig{
Message: "Select a server",
Options: serverKeys,
Default: serverKeys[0],
},
&server,
promptOpts...,
)
if err != nil {
return nil, err
}
return servers[server], nil
}
func getKeys(m map[string]*configv1alpha1.Server) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func isGlobalServer(endpoint string) bool {
if strings.Contains(endpoint, knownGlobalHost) {
return true
}
if forceCSP {
return true
}
return false
}
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
func getPromptOpts() []component.PromptOpt {
var promptOpts []component.PromptOpt
if stderrOnly {
// This uses stderr because it needs to work inside the kubectl exec plugin flow where stdout is reserved.
promptOpts = append(promptOpts, component.WithStdio(os.Stdin, os.Stderr, os.Stderr))
}
return promptOpts
}
func createNewServer() (server *configv1alpha1.Server, err error) {
// user provided command line options to create a server using kubeconfig[optional] and context
if kubecontext != "" {
return createServerWithKubeconfig()
}
// user provided command line options to create a server using endpoint
if endpoint != "" {
return createServerWithEndpoint()
}
promptOpts := getPromptOpts()
var loginType string
err = component.Prompt(
&component.PromptConfig{
Message: "Select login type",
Options: []string{"Server endpoint", "Local kubeconfig"},
Default: "Server endpoint",
},
&loginType,
promptOpts...,
)
if err != nil {
return server, err
}
if loginType == "Server endpoint" {
return createServerWithEndpoint()
}
return createServerWithKubeconfig()
}
func createServerWithKubeconfig() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if kubeConfig == "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter path to kubeconfig (if any)",
},
&kubeConfig,
promptOpts...,
)
if err != nil {
return
}
}
if kubeConfig == "" {
kubeConfig = getDefaultKubeconfigPath()
}
if kubeConfig != "" && kubecontext == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter kube context to use",
},
&kubecontext,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, err
}
if nameExists {
err = fmt.Errorf("server %q already exists", name)
return
}
server = &configv1alpha1.Server{
Name: name,
Type: configv1alpha1.ManagementClusterServerType,
ManagementClusterOpts: &configv1alpha1.ManagementClusterServer{
Path: kubeConfig,
Context: kubecontext,
Endpoint: endpoint},
}
return server, err
}
func createServerWithEndpoint() (server *configv1alpha1.Server, err error) {
promptOpts := getPromptOpts()
if endpoint == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Enter server endpoint",
},
&endpoint,
promptOpts...,
)
if err != nil {
return
}
}
if name == "" {
err = component.Prompt(
&component.PromptConfig{
Message: "Give the server a name",
},
&name,
promptOpts...,
)
if err != nil {
return
}
}
nameExists, err := config.ServerExists(name)
if err != nil {
return server, | main | identifier_name | |
entity.rs | ,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String |
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", | {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
} | identifier_body |
entity.rs | HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
} | }
// FIXME: tests | random_line_split | |
entity.rs | ,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn | () -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "acc1@mail.com");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "acc2@mail.com");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "acc1@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "acc2@mail.com".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <acc1@mail.com>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <acc2@mail.com>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "mail@address.com", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "mail@address.com");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res | path_from_home | identifier_name |
zip.go |
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verboseMsg string
// create zip file e.g. greeting.zip
if zipFile, err = os.Create(zw.des); err != nil {
return err
}
defer zipFile.Close()
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_CREATING_ZIP_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: zipFile.Name(),
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
// creating a new zip writter for greeting.zip
zw.zipWriter = zip.NewWriter(zipFile)
// build a map of file names and bool indicating whether the file is included or excluded
// iterate over the directory specified in "function", find the list of files and mark them as not excluded
if err = zw.findExcludedIncludedFiles(zw.src, false); err != nil {
return err
}
if err = zw.buildExcludeMetadata(); err != nil {
return err
}
// walk file system rooted at the directory specified in "function"
// walk over each file and dir under root directory e.g. function: actions/greeting
// add actions/greeting/index.js and actions/greeting/package.json to zip file
if err = filepath.Walk(zw.src, zw.zipFile); err != nil {
return nil
}
// maintain a list of included files and/or directories with their destination
var includeInfo []Include
includeInfo, err = zw.buildIncludeMetadata()
if err != nil {
return err
}
for _, i := range includeInfo | {
if i.source != i.destination {
// now determine whether the included item is file or dir
// it could list something like this as well, "actions/common/*.js"
if fileInfo, err = os.Stat(i.source); err != nil {
return err
}
// if the included item is a directory, call a function to copy the
// entire directory recursively including its subdirectories and files
if fileInfo.Mode().IsDir() {
if err = copyDir(i.source, i.destination); err != nil {
return err
}
// if the included item is a file, call a function to copy the file
// along with its path by creating the parent directories
} else if fileInfo.Mode().IsRegular() {
if err = copyFile(i.source, i.destination); err != nil {
return err
} | conditional_block | |
zip.go |
}
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) | (functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw.findExcludedIncludedFiles(file, flag); err != nil {
return err
}
}
}
return err
}
func (zw *ZipWriter) Zip() error {
var zipFile *os.File
var err error
var fileInfo os.FileInfo
var verbose | findExcludedIncludedFiles | identifier_name |
zip.go | package utils
import (
"archive/zip"
"io"
"os"
"path/filepath"
"strings"
"github.com/apache/openwhisk-wskdeploy/wski18n"
"github.com/apache/openwhisk-wskdeploy/wskprint"
)
const PATH_WILDCARD = "*"
const ONE_DIR_UP = "../"
func NewZipWriter(src string, des string, include [][]string, exclude []string, manifestFilePath string) *ZipWriter {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
}
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path | random_line_split | ||
zip.go |
type ZipWriter struct {
src string
des string
include [][]string
exclude []string
excludedFiles map[string]bool
manifestFilePath string
zipWriter *zip.Writer
}
type Include struct {
source string
destination string
}
func (zw *ZipWriter) zipFile(path string, f os.FileInfo, err error) error {
var file *os.File
var wr io.Writer
var verboseMsg string
if err != nil {
return err
}
if zw.excludedFiles[filepath.Clean(path)] {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_EXCLUDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
if !f.Mode().IsRegular() || f.Size() == 0 {
return nil
}
if file, err = os.Open(path); err != nil {
return err
}
defer file.Close()
fileName := strings.TrimPrefix(path, zw.src+"/")
if wr, err = zw.zipWriter.Create(fileName); err != nil {
return err
}
if _, err = io.Copy(wr, file); err != nil {
return err
}
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_ADDING_FILE_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: path,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
return nil
}
func (zw *ZipWriter) buildIncludeMetadata() ([]Include, error) {
var includeInfo []Include
var listOfSourceFiles []string
var err error
var verboseMsg string
// iterate over set of included files specified in manifest YAML e.g.
// include:
// - ["source"]
// - ["source", "destination"]
for _, includeData := range zw.include {
var i Include
// if "destination" is not specified, its considered same as "source"
// "source" is relative to where manifest.yaml file is located
// relative source path is converted to absolute path by appending manifest path
// since the relative source path might not be accessible from where wskdeploy is invoked
// "destination" is relative to the action directory, the one specified in function
// relative path is converted to absolute path by appending function directory
if len(includeData) == 1 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = filepath.Join(zw.src, includeData[0])
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else if len(includeData) == 2 {
i.source = filepath.Join(zw.manifestFilePath, includeData[0])
i.destination = zw.src + "/" + includeData[1]
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: includeData[0],
wski18n.KEY_DESTINATION: includeData[1],
})
} else {
if len(includeData) == 0 {
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: "",
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
} else {
for index, d := range includeData {
includeData[index] = "\"" + d + "\""
}
includeEntry := strings.Join(includeData, ", ")
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_INVALID_INCLUDE_ENTRY,
map[string]interface{}{
wski18n.KEY_INCLUDE: includeEntry,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
continue
}
// set destDir to the destination location
// check if its a file than change it to the Dir of destination file
destDir := i.destination
if isFilePath(destDir) {
destDir = filepath.Dir(destDir)
}
// trim path wildcard "*" from the destination path as if it has any
destDirs := strings.Split(destDir, PATH_WILDCARD)
destDir = destDirs[0]
// retrieve the name of all files matching pattern or nil if there is no matching file
// listOfSourceFiles will hold a list of files matching patterns such as
// actions/* or actions/libs/* or actions/libs/*/utils.js or actions/*/*/utils.js
if listOfSourceFiles, err = filepath.Glob(i.source); err != nil {
return includeInfo, err
}
// handle the scenarios where included path is something similar to actions/common/*.js
// or actions/libs/* or actions/libs/*/utils.js
// and destination is set to libs/ or libs/* or ./libs/* or libs/*/utils.js or libs/ or ./libs/
if strings.ContainsAny(i.source, PATH_WILDCARD) {
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
for _, file := range listOfSourceFiles {
var relPath string
if relPath, err = filepath.Rel(i.source, file); err != nil {
return includeInfo, err
}
relPath = strings.TrimLeft(relPath, ONE_DIR_UP)
j := Include{
source: file,
destination: filepath.Join(destDir, relPath),
}
includeInfo = append(includeInfo, j)
zw.excludedFiles[j.source] = false
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: j.source,
wski18n.KEY_DESTINATION: j.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
}
// handle scenarios where included path is something similar to actions/common/utils.js
// and destination is set to ./common/ i.e. no file name specified in the destination
} else {
if f, err := isFile(i.source); err == nil && f {
if _, file := filepath.Split(i.destination); len(file) == 0 {
_, sFile := filepath.Split(i.source)
i.destination = i.destination + sFile
}
}
// append just parsed include info to the list for further processing
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, wski18n.T(wski18n.ID_VERBOSE_LIST_OF_FILES_MATCHING_PATTERN))
verboseMsg = wski18n.T(wski18n.ID_VERBOSE_ZIP_INCLUDE_SOURCE_PATH_X_path_X_DESTINATION_PATH_X_dest_X,
map[string]interface{}{
wski18n.KEY_PATH: i.source,
wski18n.KEY_DESTINATION: i.destination,
})
wskprint.PrintlnOpenWhiskVerbose(Flags.Verbose, verboseMsg)
includeInfo = append(includeInfo, i)
zw.excludedFiles[i.source] = false
}
}
return includeInfo, nil
}
func (zw *ZipWriter) buildExcludeMetadata() error {
var err error
for _, exclude := range zw.exclude {
exclude = filepath.Join(zw.manifestFilePath, exclude)
if err = zw.findExcludedIncludedFiles(exclude, true); err != nil {
return err
}
}
return err
}
func (zw *ZipWriter) findExcludedIncludedFiles(functionPath string, flag bool) error {
var err error
var files []string
var excludedFiles []string
var f bool
if !strings.HasSuffix(functionPath, PATH_WILDCARD) {
functionPath = filepath.Join(functionPath, PATH_WILDCARD)
}
if excludedFiles, err = filepath.Glob(functionPath); err != nil {
return err
}
for _, file := range excludedFiles {
err = filepath.Walk(file, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
return err
}
}
for _, file := range files {
if f, err = isFile(file); err != nil {
return err
} else if f {
zw.excludedFiles[file] = flag
} else {
if err = zw | {
zw := &ZipWriter{
src: src,
des: des,
include: include,
exclude: exclude,
excludedFiles: make(map[string]bool, 0),
manifestFilePath: manifestFilePath,
}
return zw
} | identifier_body | |
ingress.go | ) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) {
continue
}
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func | expectedIngressPath | identifier_name | |
ingress.go | .GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) | {
continue
} | conditional_block | |
ingress.go | : iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", ingress)
return
}
ingresses, ok := ingressesRaw.([]any)
if !ok {
logger.V(3).Infof("Unexpected ingress object structure from unstructured: %#v", ingress)
return
}
// Update status of ingress object so that we can check success.
iia.ingressReady = len(ingresses) > 0
logger.V(3).Infof("Waiting for ingress %q to update .status.loadBalancer with hostname/IP",
inputIngressName)
}
func decodeIngress(u *unstructured.Unstructured, to any) error {
b, err := u.MarshalJSON()
if err != nil {
return err
}
err = json.Unmarshal(b, to)
if err != nil {
return err
}
return nil
}
func (iia *ingressInitAwaiter) checkIfEndpointsReady() (string, bool) {
apiVersion := iia.ingress.GetAPIVersion()
switch apiVersion {
case "extensions/v1beta1", "networking.k8s.io/v1beta1":
var obj networkingv1beta1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1beta1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// Ignore ExternalName services
if path.Backend.ServiceName != "" && iia.knownExternalNameServices.Has(path.Backend.ServiceName) {
continue
}
if path.Backend.ServiceName != "" && !iia.knownEndpointObjects.Has(path.Backend.ServiceName) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist.
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.ServiceName)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
case "networking.k8s.io/v1":
var obj networkingv1.Ingress
if err := decodeIngress(iia.ingress, &obj); err != nil {
logger.V(3).Infof("Unable to decode Ingress object from unstructured: %#v", iia.ingress)
return apiVersion, false
}
for _, rule := range obj.Spec.Rules {
var httpIngressPaths []networkingv1.HTTPIngressPath
if rule.HTTP != nil {
httpIngressPaths = rule.HTTP.Paths
}
for _, path := range httpIngressPaths {
// TODO: Should we worry about "resource" backends?
if path.Backend.Service == nil {
continue
}
// Ignore ExternalName services
if path.Backend.Service.Name != "" && iia.knownExternalNameServices.Has(path.Backend.Service.Name) {
continue
}
if path.Backend.Service.Name != "" && !iia.knownEndpointObjects.Has(path.Backend.Service.Name) {
if iia.endpointsSettled {
// We haven't seen the target endpoint emit any events within the settlement period
// and there is a chance it may never exist
// (https://github.com/pulumi/pulumi-kubernetes/issues/1810)
iia.config.logStatus(diag.Warning, fmt.Sprintf("No matching service found for ingress rule: %s",
expectedIngressPath(rule.Host, path.Path, path.Backend.Service.Name)))
} else {
// We may get more endpoint events, lets wait and retry.
return apiVersion, false
}
}
}
}
}
return apiVersion, true
}
// expectedIngressPath is a helper to print a useful error message.
func expectedIngressPath(host, path, serviceName string) string {
rulePath := path
if host != "" {
rulePath = host + path
}
// It is valid for a user not to specify either a host or path [1]. In this case, any traffic not
// matching another rule is routed to the specified Service for this rule. Print
// `"" (default path)` to make this expectation clear to users.
//
// [1] https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#httpingresspath-v1beta1-extensions
if rulePath == "" {
rulePath = `"" (default path)`
} else {
rulePath = fmt.Sprintf("%q", rulePath)
}
// [host][path] -> serviceName
return fmt.Sprintf("%s -> %q", rulePath, serviceName)
}
func (iia *ingressInitAwaiter) processEndpointEvent(event watch.Event, settledCh chan<- struct{}) {
// Get endpoint object.
endpoint, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Endpoint watch received unknown object type %q",
reflect.TypeOf(endpoint))
return
}
iia.endpointEventsCount++
name := endpoint.GetName()
switch event.Type {
case watch.Added, watch.Modified:
iia.knownEndpointObjects.Insert(name)
case watch.Deleted:
iia.knownEndpointObjects.Delete(name)
// NOTE: Unlike `processServiceEvent` don't return; we still want to set
// `iia.endpointsSettled` to `false`.
}
// Every time we get an update to one of our endpoints objects, give it a few seconds
// for them to settle.
iia.endpointsSettled = false
go func() {
time.Sleep(10 * time.Second)
settledCh <- struct{}{}
}()
}
func (iia *ingressInitAwaiter) errorMessages() []string | {
messages := make([]string, 0)
if _, ready := iia.checkIfEndpointsReady(); !ready {
messages = append(messages, "Ingress has at least one rule with an unavailable target endpoint.")
}
if !iia.ingressReady {
messages = append(messages,
"Ingress .status.loadBalancer field was not updated with a hostname/IP address. "+
"\n for more information about this error, see https://pulumi.io/xdv72s")
}
return messages
} | identifier_body | |
ingress.go | type ingressInitAwaiter struct {
config createAwaitConfig
ingress *unstructured.Unstructured
ingressReady bool
endpointsSettled bool
endpointEventsCount uint64
knownEndpointObjects sets.String
knownExternalNameServices sets.String
}
func makeIngressInitAwaiter(c createAwaitConfig) *ingressInitAwaiter {
return &ingressInitAwaiter{
config: c,
ingress: c.currentOutputs,
ingressReady: false,
endpointsSettled: false,
knownEndpointObjects: sets.NewString(),
knownExternalNameServices: sets.NewString(),
}
}
func awaitIngressInit(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Await()
}
func awaitIngressRead(c createAwaitConfig) error {
return makeIngressInitAwaiter(c).Read()
}
func awaitIngressUpdate(u updateAwaitConfig) error {
return makeIngressInitAwaiter(u.createAwaitConfig).Await()
}
func (iia *ingressInitAwaiter) Await() error {
//
// We succeed only when all of the following are true:
//
// 1. Ingress object exists.
// 2. Endpoint objects exist with matching names for each Ingress path (except when Service
// type is ExternalName).
// 3. Ingress entry exists for .status.loadBalancer.ingress.
//
stopper := make(chan struct{})
defer close(stopper)
informerFactory := informers.NewInformerFactory(iia.config.clientSet,
informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace()))
informerFactory.Start(stopper)
ingressEvents := make(chan watch.Event)
ingressInformer, err := informers.New(informerFactory, informers.ForGVR(schema.GroupVersionResource{
Group: "networking.k8s.io",
Version: "v1",
Resource: "ingresses",
}), informers.WithEventChannel(ingressEvents))
if err != nil {
return err
}
go ingressInformer.Informer().Run(stopper)
endpointsEvents := make(chan watch.Event)
endpointsInformer, err := informers.New(informerFactory, informers.ForEndpoints(), informers.WithEventChannel(endpointsEvents))
if err != nil {
return err
}
go endpointsInformer.Informer().Run(stopper)
serviceEvents := make(chan watch.Event)
serviceInformer, err := informers.New(informerFactory, informers.ForServices(), informers.WithEventChannel(serviceEvents))
if err != nil {
return err
}
go serviceInformer.Informer().Run(stopper)
timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60)
return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout))
}
func (iia *ingressInitAwaiter) Read() error {
ingressClient, endpointsClient, servicesClient, err := iia.makeClients()
if err != nil {
return err
}
// Get live versions of Ingress.
ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{})
if err != nil {
// IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it
// can mark the deployment as having been deleted.
return err
}
// Get live version of Endpoints.
endpointList, err := endpointsClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list endpoints needed for Ingress awaiter: %v", err)
endpointList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
serviceList, err := servicesClient.List(iia.config.ctx, metav1.ListOptions{})
if err != nil {
logger.V(3).Infof("Failed to list services needed for Ingress awaiter: %v", err)
serviceList = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
}
return iia.read(ingress, endpointList, serviceList)
}
func (iia *ingressInitAwaiter) read(ingress *unstructured.Unstructured, endpoints *unstructured.UnstructuredList,
services *unstructured.UnstructuredList) error {
iia.processIngressEvent(watchAddedEvent(ingress))
err := services.EachListItem(func(service runtime.Object) error {
iia.processServiceEvent(watchAddedEvent(service.(*unstructured.Unstructured)))
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for service %q: %v", ingress.GetName(), err)
}
settled := make(chan struct{})
logger.V(3).Infof("Processing endpoint list: %#v", endpoints)
err = endpoints.EachListItem(func(endpoint runtime.Object) error {
iia.processEndpointEvent(watchAddedEvent(endpoint.(*unstructured.Unstructured)), settled)
return nil
})
if err != nil {
logger.V(3).Infof("Error iterating over endpoint list for ingress %q: %v", ingress.GetName(), err)
}
iia.endpointsSettled = true
if iia.checkAndLogStatus() {
return nil
}
return &initializationError{
subErrors: iia.errorMessages(),
object: ingress,
}
}
// await is a helper companion to `Await` designed to make it easy to test this module.
func (iia *ingressInitAwaiter) await(
ingressEvents, serviceEvents, endpointsEvents <-chan watch.Event,
settled chan struct{},
settlementGracePeriodExpired <-chan time.Time,
timeout <-chan time.Time,
) error {
iia.config.logStatus(diag.Info, "[1/3] Finding a matching service for each Ingress path")
for {
// Check whether we've succeeded.
if iia.checkAndLogStatus() {
return nil
}
// Else, wait for updates.
select {
case <-iia.config.ctx.Done():
// On cancel, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &cancellationError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-timeout:
// On timeout, check one last time if the ingress is ready.
if _, ready := iia.checkIfEndpointsReady(); ready && iia.ingressReady {
return nil
}
return &timeoutError{
object: iia.ingress,
subErrors: iia.errorMessages(),
}
case <-settlementGracePeriodExpired:
// If we don't see any endpoint events in the designated time, assume endpoints have settled.
// This is to account for the distinct possibility of ingress using a resource reference or non-existent
// endpoints - in which case we will never see corresponding endpoint events.
if iia.endpointEventsCount == 0 {
iia.endpointsSettled = true
}
case <-settled:
iia.endpointsSettled = true
case event := <-ingressEvents:
iia.processIngressEvent(event)
case event := <-endpointsEvents:
iia.processEndpointEvent(event, settled)
case event := <-serviceEvents:
iia.processServiceEvent(event)
}
}
}
func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) {
service, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Service watch received unknown object type %q",
reflect.TypeOf(service))
return
}
name := service.GetName()
if event.Type == watch.Deleted {
iia.knownExternalNameServices.Delete(name)
return
}
t, ok := openapi.Pluck(service.Object, "spec", "type")
if ok && t.(string) == "ExternalName" {
iia.knownExternalNameServices.Insert(name)
}
}
func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) {
inputIngressName := iia.config.currentInputs.GetName()
ingress, isUnstructured := event.Object.(*unstructured.Unstructured)
if !isUnstructured {
logger.V(3).Infof("Ingress watch received unknown object type %q",
reflect.TypeOf(ingress))
return
}
// Do nothing if this is not the ingress we're waiting for.
if ingress.GetName() != inputIngressName {
return
}
// Start with a blank slate.
iia.ingressReady = false
// Mark the ingress as not ready if it's deleted.
if event.Type == watch.Deleted {
return
}
iia.ingress = ingress
// To the best of my knowledge, this works across all known ingress api version variations.
ingressesRaw, ok := openapi.Pluck(ingress.Object, "status", "loadBalancer", "ingress")
if !ok |
const (
DefaultIngressTimeoutMins = 10
)
| random_line_split | |
test.py | thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\033[93m "+text+"\033[0m"
def createDir(dirname, clean=False):
if clean and os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def visitTests(tests, grepStr=''): | """ Show the list of tests available """ | random_line_split | |
test.py | self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
|
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return "\ | """ Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush() | identifier_body |
test.py | self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
| else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def doReport(self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return | outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg)) | conditional_block |
test.py | self.process = None
self.env = env
def run(self, timeout):
# type: (object) -> object
def target():
self.process = subprocess.Popen(self.cmd, shell=True, env=self.env)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(red('ERROR: timeout reached for this process'))
self.process.terminate()
thread.join()
self.process = None
def terminate(self):
if self.process != None:
self.process.terminate()
print('Ctrl-c pressed, aborting this test')
class ProgramTest(unittest.TestCase):
_testDir = None
_environ = None
_timeout = 300
# _labels = [WEEKLY]
@classmethod
def setTestDir(cls, newTestDir):
cls._testDir = newTestDir
@classmethod
def setEnviron(cls, newEnviron):
cls._environ = newEnviron
@classmethod
def setTimeOut(cls, newTimeOut):
cls._timeout = newTimeOut
def _parseArgs(self, args):
''' Expand our tags %o, %p and %d with corresponding values '''
args = args.replace("%o", self.outputDir)
args = args.replace("%p", self.program)
#args = args.replace("%d", self.fnDir)
return args
def _runCommands(self, cmdList, cmdType):
""" Run several commands.
Params:
cmdList: the list of commands to execute.
cmdType: either 'preruns' or 'postruns'
"""
pipe = '>'
outDir = self.outputDir
for cmd in cmdList:
if cmd:
cmd = self._parseArgs(cmd)
cmd = " %(cmd)s %(pipe)s %(outDir)s/%(cmdType)s_stdout.txt 2%(pipe)s %(outDir)s/%(cmdType)s_stderr.txt" % locals()
print " Running %s: %s" % (cmdType, blue(cmd))
command = Command(cmd, env=os.environ)
command.run(timeout=self._timeout)
pipe = ">>"
sys.stdout.flush()
def runCase(self, args, mpi=0, changeDir=False,
preruns=None, postruns=None, validate=None,
outputs=None, random=False, errorthreshold=0.001):
# Retrieve the correct case number from the test name id
# We asumme here that 'test_caseXXX' should be in the name
caseId = unittest.TestCase.id(self)
if not 'test_case' in caseId:
raise Exception("'test_case' string should be in the test function name followed by a number")
_counter = int(caseId.split('test_case')[1])
self._testDir = os.environ.get("XMIPP_TEST_DATA")
self.outputDir = os.path.join('tmpLink', '%s_%02d' % (self.program, _counter))
self.outputDirAbs = os.path.join(self._testDir, self.outputDir)
self.goldDir = os.path.join(self._testDir, 'gold', '%s_%02d' % (self.program, _counter))
# Change to tests root folder (self._testDir)
cwd = os.getcwd()
os.chdir(self._testDir)
# Clean and create the program output folder if not exists
createDir(self.outputDir, clean=True)
if preruns:
self._runCommands(preruns, 'preruns')
if mpi:
cmd = "mpirun -np %d `which %s`" % (mpi, self.program)
else:
cmd = self.program
args = self._parseArgs(args)
if changeDir:
cmd = "cd %s ; %s %s > stdout.txt 2> stderr.txt" % (self.outputDir, cmd, args)
else:
cmd = "%s %s > %s/stdout.txt 2> %s/stderr.txt" % (cmd, args, self.outputDir, self.outputDir)
print " Command: "
print " ", blue(cmd)
sys.stdout.flush()
#run the test itself
command = Command(cmd, env=os.environ)
self._command = command
try:
command.run(timeout=self._timeout)
except KeyboardInterrupt:
command.terminate()
stderrFn = "%s/stderr.txt" % self.outputDir
if os.path.exists(stderrFn):
errFile = open(stderrFn, 'r')
errStr = errFile.read()
errFile.close()
if 'XMIPP_ERROR' in errStr:
print errStr
if postruns:
self._runCommands(postruns, 'postruns')
if outputs:
self._checkOutputs(outputs, random, errorthreshold=errorthreshold)
if validate:
validate()
os.chdir(cwd)
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):
""" Check that all output files are produced
and are equivalent to the ones in goldStandard folder.
"""
for out in outputs:
outFile = os.path.join(self._testDir, self.outputDir, out)
fileGoldStd = os.path.join(self.goldDir, out)
# Check the expect output file was produced
msg = "Missing expected output file:\n output: %s" % outFile
self.assertTrue(os.path.exists(outFile), red(msg))
if random:
print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile))
else:
fnGoldStd = xmippLib.FileName(fileGoldStd)
if fnGoldStd.isImage():
im1 = xmippLib.Image(fileGoldStd)
im2 = xmippLib.Image(outFile)
msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \
(errorthreshold, outFile, fileGoldStd)
self.assertTrue(im1.equal(im2, errorthreshold), red(msg))
elif fnGoldStd.isMetaData():
msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))
else:
msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd)
self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
class GTestResult(unittest.TestResult):
""" Subclass TestResult to output tests results with colors (green for success and red for failure)
and write a report on an .xml file.
"""
xml = None
testFailed = 0
numberTests = 0
def __init__(self):
unittest.TestResult.__init__(self)
self.startTimeAll = time.time()
def openXmlReport(self, classname, filename):
# self.xml = open(filename, 'w')
# self.xml.write('<testsuite name="%s">\n' % classname)
pass
def | (self):
secs = time.time() - self.startTimeAll
sys.stderr.write("%s run %d tests (%0.3f secs)\n" %
(green("[==========]"), self.numberTests, secs))
if self.testFailed:
print >> sys.stderr, red("[ FAILED ]") + " %d tests" % self.testFailed
print >> sys.stdout, green("[ PASSED ]") + " %d tests" % (self.numberTests - self.testFailed)
sys.stdout.flush()
# self.xml.write('</testsuite>\n')
# self.xml.close()
def tic(self):
self.startTime = time.time()
def toc(self):
return time.time() - self.startTime
def startTest(self, test):
self.tic()
self.numberTests += 1
def getTestName(self, test):
parts = str(test).split()
name = parts[0]
parts = parts[1].split('.')
classname = parts[-1].replace(")", "")
return "%s.%s" % (classname, name)
def addSuccess(self, test):
secs = self.toc()
sys.stderr.write("%s %s (%0.3f secs)\n\n" % (green('[ RUN OK ]'), self.getTestName(test), secs))
def reportError(self, test, err):
sys.stderr.write("\n%s" % ("".join(format_exception(*err))))
sys.stderr.write("%s %s\n\n" % (red('[ FAILED ]'),
self.getTestName(test)))
self.testFailed += 1
def addError(self, test, err):
self.reportError(test, err)
def addFailure(self, test, err):
self.reportError(test, err)
def green(text):
return "\033[92m "+text+"\033[0m"
def red(text):
return "\033[91m "+text+"\033[0m"
def blue(text):
return "\033[34m "+text+"\033[0m"
def yellow(text):
return | doReport | identifier_name |
DLProject_DNN.py |
os.chdir(path)
cwd = os.getcwd()
# 256 neurons in each hidden layers
n_hidden_1 = 400
n_hidden_2 = 300
n_hidden_3 = 200
n_hidden_4 = 100
n_hidden_5 = 50
# There are 10 levels, and we consider 40 timestamps and the mid_price
# output size
input_size = 400
output_size = 3
# Parameters
learning_rate = 0.001
training_epochs = 2500
batch_size = 32
display_step = 1
test_size=0.2
start_time = time.time()
# number of previous timestamps used for prediction
n_times = 40
# number of quote levels used for prediction
n_levels = 10
split=0.7
#Labels to extract from data
L_labels = ['Date', 'Time', 'L1-BidPrice', 'L1-BidSize', 'L1-BuyNo', 'L1-AskPrice',
'L1-AskSize', 'L1-SellNo', 'L2-BidPrice', 'L2-BidSize', 'L2-BuyNo',
'L2-AskPrice', 'L2-AskSize', 'L2-SellNo', 'L3-BidPrice', 'L3-BidSize',
'L3-BuyNo', 'L3-AskPrice', 'L3-AskSize', 'L3-SellNo', 'L4-BidPrice',
'L4-BidSize', 'L4-BuyNo', 'L4-AskPrice', 'L4-AskSize', 'L4-SellNo',
'L5-BidPrice', 'L5-BidSize', 'L5-BuyNo', 'L5-AskPrice', 'L5-AskSize',
'L5-SellNo', 'L6-BidPrice', 'L6-BidSize', 'L6-BuyNo', 'L6-AskPrice',
'L6-AskSize', 'L6-SellNo', 'L7-BidPrice', 'L7-BidSize', 'L7-BuyNo',
'L7-AskPrice', 'L7-AskSize', 'L7-SellNo', 'L8-BidPrice', 'L8-BidSize',
'L8-BuyNo', 'L8-AskPrice', 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def diff(x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = []
for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i)
if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3 | ile_path = Path | conditional_block | |
DLProject_DNN.py | L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def d | x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = []
for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i)
if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight | iff( | identifier_name |
DLProject_DNN.py | length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.softmax(tf.matmul(x, W) + b)
def layer2(x, weight_shape, bias_shape):
"""
Defines the network layers
input:
- x: input vector of the layer
- weight_shape: shape the the weight maxtrix
- bias_shape: shape of the bias vector
output:
- output vector of the layer after the matrix multiplication and transformation
"""
weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)
W = tf.get_variable("W", weight_shape, initializer=weight_init)
bias_init = tf.constant_initializer(value=0)
b = tf.get_variable("b", bias_shape, initializer=bias_init)
return tf.nn.relu(tf.matmul(x, W) + b)
def inference(x):
"""
define the whole network (5 hidden layers + output layers)
input:
- a batch of pictures
(input shape = (batch_size*image_size))
output:
- a batch vector corresponding to the logits predicted by the network
(output shape = (batch_size*output_size))
"""
print(type(x))
print(np.shape(x))
print(x)
with tf.variable_scope("hidden_layer_1"):
hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])
#print([input_size, n_hidden_1])
with tf.variable_scope("hidden_layer_2"):
hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])
#print([n_hidden_1, n_hidden_2])
with tf.variable_scope("hidden_layer_3"):
hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])
#print([n_hidden_2, n_hidden_3])
with tf.variable_scope("hidden_layer_4"):
hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])
#print([n_hidden_3, n_hidden_4])
with tf.variable_scope("hidden_layer_5"):
hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])
#print([n_hidden_4, n_hidden_5])
with tf.variable_scope("output"):
output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])
#print([n_hidden_5, output_size])
return output
def loss(output, y):
"""
Computes softmax cross entropy between logits and labels and then the loss
intput:
- output: the output of the inference function
- y: true value of the sample batch
the two have the same shape (batch_size * num_of_classes)
output:
- loss: loss of the corresponding batch (scalar tensor)
"""
#Computes softmax cross entropy between logits and labels.
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)
loss = tf.reduce_mean(xentropy)
return loss
def training(cost, global_step):
"""
defines the necessary elements to train the network
intput:
- cost: the cost is the loss of the corresponding batch
- global_step: number of batch seen so far, it is incremented by one each time the .minimize() function is called
"""
tf.summary.scalar("cost", cost)
# using Adam Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(cost, global_step=global_step)
return train_op
def evaluate(output, y):
"""
evaluates the accuracy on the validation set
input:
-output: prediction vector of the network for the validation set
-y: true value for the validation set
output:
- accuracy: accuracy on the validation set (scalar between 0 and 1)
"""
#correct prediction is a binary vector which equals one when the output and y match
#otherwise the vector equals 0
#tf.cast: change the type of a tensor into another one
#then, by taking the mean of the tensor, we directly have the average score, so the accuracy
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("validation_error", (1.0 - accuracy))
return accuracy
epochs_completed = 0
index_in_epoch = 0
num_examples = X_train.shape[0]
# for splitting out batches of data
def next_batch(batch_size):
g | lobal X_train
global y_train
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
# start next epoch
start = 0
index_in_epoch = batch_size | identifier_body | |
DLProject_DNN.py | 'L8-AskSize', 'L8-SellNo', 'L9-BidPrice',
'L9-BidSize', 'L9-BuyNo', 'L9-AskPrice', 'L9-AskSize', 'L9-SellNo',
'L10-BidPrice', 'L10-BidSize', 'L10-BuyNo', 'L10-AskPrice', 'L10-AskSize',
'L10-SellNo']
# import tick data from the given path
print("Importing the data...")
Nrow = input("How many rows in the dataset do you want to consider ?")
if (Nrow == ''):
df_import = pd.read_csv(file_path)
else :
df_import = pd.read_csv(file_path, nrows=int(Nrow))
df = df_import[L_labels]
print("Done.")
df2 = pd.DataFrame()
print()
print("Rearraging the data to compute the mid prices..")
print(" First step..")
count = 0
for i in range(n_levels):
if (count/10 < np.floor(10*i/n_levels)):
count = round(100*i/n_levels)
print (" "+str(count)+" %"),
i = i+1
df2['relative depth'+str(i)]=df['L'+str(i)+'-BidSize']/(df['L'+str(i)+'-BidSize']+df['L'+str(i)+'-AskSize'])
df2['mid price']=(df['L1-AskPrice']+df['L1-BidPrice'])/2
df2['mid_price_change'] = 0
print(" Done.. ")
print(" Second step..")
count = 0
for i in range(1,len(df2)):
if (count/10 < np.floor(10*i/len(df2))):
count = round(100*i/len(df2))
print(" "+str(count)+" %")
df2.loc[i,'mid_price_change'] = df2.loc[i,'mid price']- df2.loc[i-1,'mid price']
print(" Done.. ")
print(" Third step..")
df3 = pd.DataFrame()
count = 0
for i in range(len(df2)-n_times):
if (count/10 < np.floor(10*i/(len(df2)-n_times))):
count = round(100*i/(len(df2)-n_times))
print(" "+str(count)+" %")
my_list = []
dataf = df2.iloc[i:(i+n_times),0:10]
datf = dataf.values.reshape(dataf.shape[0]*dataf.shape[1])
df_temp = pd.DataFrame([list(datf)])
df_temp['mid_price_change']=df2.loc[i+n_times,'mid_price_change']
df3 = pd.concat([df3,df_temp],ignore_index=True)
print(" Done.. ")
df3['classification'] = 0
df3['classification'] = df3['mid_price_change'].apply(lambda x: 0 if x == 0 else 1 if x > 0 else -1)
df3 = df3.drop(['mid_price_change'],axis=1)
print("Done.")
print()
# undersampling with temporal bias
print("Random undersampling...")
# set up a remaining rate
rr = 0.5
# construct bins of the majority class
my_list = []
temp_list = []
remain_index = []
for i in range(len(df3)):
if df3.loc[i,'classification']==0:
temp_list.append(i)
else:
remain_index.append(i)
if len(temp_list)>0:
my_list.append(temp_list)
temp_list = []
# in each bin, drop the data with a relative probability according to its position
for bins in my_list:
length = len(bins)
temp_list = np.arange(length)+1
pp = temp_list/sum(temp_list)
num = math.ceil(length*rr)
temp_index = np.random.choice(temp_list,num,p=pp,replace=False)
for i in temp_index:
remain_index.append(bins[i-1])
df3 = df3.loc[remain_index]
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
# Oversampling using Smote algorithm
# setup the oversampling/enlarging rate er and the number of nearest neighbors k
k = 2
print("Smote Algorithm Oversampling...")
# calculate difference between 2 feature vectors
def diff(x,y,df,my_dict):
x_index = my_dict[x]
y_index = my_dict[y]
vec1 = np.array(df3.iloc[x_index,:df.shape[1]-1])
vec2 = np.array(df3.iloc[y_index,:df.shape[1]-1])
vec_diff = vec2-vec1
return vec_diff
# find index of nearest k neighbors from j's row of data
def find_neighbors(j,k,my_dict,my_dict2,df):
i = my_dict[j]
global dist_matrix
for l in range(len(dist_matrix)):
if dist_matrix[i][l]!=0 or i == l:
continue
dist_matrix[i][l]=dist_matrix[l][i]=LA.norm(diff(i,l,df,my_dict2))
a = dist_matrix[i]
neighbors = sorted(range(len(a)), key=lambda i: a[i])[:k+1]
neighbors.remove(i)
res = []
for l in neighbors:
res.append(my_dict2[l])
return res
temp_list = []
temp_list_neg = [] | if df3.loc[i,'classification']==-1:
temp_list_neg.append(i)
temp_list = np.asarray(temp_list)
length = len(temp_list)
temp_list_neg = np.asarray(temp_list_neg)
length_neg = len(temp_list_neg)
num_pos = max(1,len(df3)-length)
num_neg = max(1,len(df3)-length_neg)
# oversample the data with positive mid_price move
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_pos,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
# oversample the data with negative mid_price move
temp_list = temp_list_neg
length = length_neg
# initialize a matrix to save the feature difference between 2 feature vectors
dist_matrix = np.zeros((length,length))
# 2 dictionary to transform between matrix index and dataframe row index
my_dict = {}
my_dict2 = {}
for i in range(length):
my_dict[temp_list[i]]=i
my_dict2[i] = temp_list[i]
# main function
case_index = np.random.choice(temp_list,num_neg,replace=True)
count = 1
for j in case_index:
top_indexs = find_neighbors(j,k,my_dict,my_dict2,df3)
top_indexs = np.asarray(top_indexs)
my_index = np.random.choice(top_indexs)
temp_array = np.array(df3.iloc[j,:df3.shape[1]-1])
difference = diff(my_dict[j],my_dict[my_index],df3,my_dict2)
ran_num = random.uniform(0, 1)
temp_array = temp_array+difference*ran_num
temp_array = temp_array.tolist()
temp_array.append(-1)
df3.loc[j+count/(num+1)] = temp_array
count += 1
df3 = df3.sort_index()
df3 = df3.reset_index(drop=True)
print("Done.")
print()
df3['classification'] = df3['classification'].apply(lambda x: [0,1,0] if x == 0 else [1,0,0] if x > 0 else [0,0,1])
#df3['classification']=df3['classification'].reshape
#Splitting the data into a training and test data:
y = df3['classification'].values
y=np.stack(y)
X = df3.drop('classification', axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=42, stratify=y)
#y_test=y_test.reshape(X_test.shape[0],3)
def layer1(x, weight_shape | for i in range(len(df3)):
if df3.loc[i,'classification']==1:
temp_list.append(i) | random_line_split |
Notebook_1_DataCleansing_FeatureEngineering.py | id','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num < | = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return 'morethan20000'
cat4Udf = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", | = 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf | identifier_body |
Notebook_1_DataCleansing_FeatureEngineering.py | l = df.columns
cols = [c.replace(' ','_').
replace('[.]','_').
replace('.','_').
replace('[[:punct:]]','_').
lower() for c in l]
return df.toDF(*cols)
df = StandardizeNames(df)
# remove duplicated rows based on deviceid and date
df = df.dropDuplicates(['deviceid', 'date'])
# remove rows with missing deviceid, date
df = df.dropna(how='any', subset=['deviceid', 'date'])
df.select('deviceid','date').show(3)
# COMMAND ----------
# MAGIC %md #### Define groups of features -- date, categorical, numeric
# COMMAND ----------
#------------------------------------------- Define groups of features -----------------------------------------#
features_datetime = ['date']
features_categorical = ['deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num <= 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return 'morethan20000'
cat4Udf = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if | izeNames(df):
| identifier_name | |
Notebook_1_DataCleansing_FeatureEngineering.py | 1))\
.withColumn("fault_code_type_3_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_1))\
.withColumn("fault_code_type_4_count_per_usage1",F.when(df.usage_count_1==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_1))
# Normalize performance_normal_raw by "usage_count_2"
df = df.withColumn("problem_type_1_per_usage2", F.when(df.usage_count_2==0,0).otherwise(df.problem_type_1/df.usage_count_2))\
.withColumn("problem_type_2_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_2/df.usage_count_2))\
.withColumn("problem_type_3_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_3/df.usage_count_2))\
.withColumn("problem_type_4_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.problem_type_4/df.usage_count_2))\
.withColumn("fault_code_type_1_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_1_count/df.usage_count_2))\
.withColumn("fault_code_type_2_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_2_count/df.usage_count_2))\
.withColumn("fault_code_type_3_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_3_count/df.usage_count_2))\
.withColumn("fault_code_type_4_count_per_usage2",F.when(df.usage_count_2==0,0).otherwise(df.fault_code_type_4_count/df.usage_count_2))
# COMMAND ----------
# MAGIC %md #### Similar to what we did for "categorical_1 to 4", in the following cell we binned performance related features and created new categorical features.
# COMMAND ----------
# Define the list of performance related features which we would like to perform binning
c_names = ['problem_type_1', 'problem_type_3', 'problem_type_4',
'problem_type_1_per_usage1','problem_type_2_per_usage1','problem_type_3_per_usage1','problem_type_4_per_usage1',
'problem_type_1_per_usage2','problem_type_2_per_usage2','problem_type_3_per_usage2','problem_type_4_per_usage2',
'fault_code_type_1_count', 'fault_code_type_2_count', 'fault_code_type_3_count', 'fault_code_type_4_count',
'fault_code_type_1_count_per_usage1','fault_code_type_2_count_per_usage1',
'fault_code_type_3_count_per_usage1', 'fault_code_type_4_count_per_usage1',
'fault_code_type_1_count_per_usage2','fault_code_type_2_count_per_usage2',
'fault_code_type_3_count_per_usage2', 'fault_code_type_4_count_per_usage2']
# Bin size ('0','1','>1') for most of the performance features because majority of the values fall into the range of 0 to slightly more than 1.
def performanceCat(num):
if num == 0: return '0'
elif num ==1: return '1'
else: return '>1'
performanceCatUdf = udf(performanceCat, StringType())
for col_n in c_names:
df = df.withColumn(col_n+'_category',performanceCatUdf(df[col_n]))
# Use different bin for "problem_type_2" because we saw a larger spread of the values
def problem_type_2_Cat(num):
if num == 0: return '0'
elif 0 < num and num <= 5: return '1-5'
elif 5 < num and num <= 10: return '6-10'
else: return '>10'
problem_type_2_CatUdf = udf(problem_type_2_Cat, StringType())
df = df.withColumn('problem_type_2_category',problem_type_2_CatUdf(df['problem_type_2']))
print(df.select('problem_type_1_category').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('problem_type_2_category').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### One hot encode some categotical features
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC # Define the list of categorical features
# MAGIC
# MAGIC catVarNames = ['problem_type_1_category', 'problem_type_2_category',
# MAGIC 'problem_type_3_category', 'problem_type_4_category',
# MAGIC 'problem_type_1_per_usage1_category', 'problem_type_2_per_usage1_category',
# MAGIC 'problem_type_3_per_usage1_category', 'problem_type_4_per_usage1_category',
# MAGIC 'problem_type_1_per_usage2_category', 'problem_type_2_per_usage2_category',
# MAGIC 'problem_type_3_per_usage2_category', 'problem_type_4_per_usage2_category',
# MAGIC 'fault_code_type_1_count_category', 'fault_code_type_2_count_category',
# MAGIC 'fault_code_type_3_count_category', 'fault_code_type_4_count_category',
# MAGIC 'fault_code_type_1_count_per_usage1_category', 'fault_code_type_2_count_per_usage1_category',
# MAGIC 'fault_code_type_3_count_per_usage1_category', 'fault_code_type_4_count_per_usage1_category',
# MAGIC 'fault_code_type_1_count_per_usage2_category', 'fault_code_type_2_count_per_usage2_category',
# MAGIC 'fault_code_type_3_count_per_usage2_category', 'fault_code_type_4_count_per_usage2_category',
# MAGIC 'cat1','cat2','cat3','cat4']
# MAGIC
# MAGIC
# MAGIC sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed') for x in catVarNames]
# MAGIC
# MAGIC df_cat = Pipeline(stages=sIndexers).fit(df).transform(df)
# MAGIC
# MAGIC # Remove columns with only 1 level (compute variances of columns)
# MAGIC catColVariance = df_cat.select(
# MAGIC *(F.variance(df_cat[c]).alias(c + '_sd') for c in [cv + '_indexed' for cv in catVarNames]))
# MAGIC catColVariance = catColVariance.rdd.flatMap(lambda x: x).collect()
# MAGIC catVarNames = [catVarNames[k] for k in [i for i, v in enumerate(catColVariance) if v != 0]]
# MAGIC
# MAGIC # Encode
# MAGIC ohEncoders = [OneHotEncoder(inputCol=x + '_indexed', outputCol=x + '_encoded')
# MAGIC for x in catVarNames]
# MAGIC ohPipelineModel = Pipeline(stages=ohEncoders).fit(df_cat)
# MAGIC df_cat = ohPipelineModel.transform(df_cat)
# MAGIC
# MAGIC drop_list = [col_n for col_n in df_cat.columns if 'indexed' in col_n]
# MAGIC df = df_cat.select([column for column in df_cat.columns if column not in drop_list])
# MAGIC
# MAGIC print(df['problem_type_1_category_encoded',].head(3))
# COMMAND ----------
# MAGIC %md #### Use PCA to reduce number of features
# MAGIC In Notebook #2, we will perform a series of rolling computation for various features, time windows and aggregated statistics. This process is very computational expensive and therefore we need to first reduce the feature list.
# MAGIC In the dataset, there are many warning related features and most of them have value of 0 so quite sparse. We can group or find correlations among those warning features, reduce the feature space for downstream work.
# COMMAND ----------
## check the number of warning related features
len([col_n for col_n in df.columns if 'warning' in col_n])
# COMMAND ----------
# MAGIC %%time
# MAGIC
# MAGIC #----------------------------- PCA feature grouping on warning related features --------------------------#
# MAGIC
# MAGIC df = df.withColumn("key", concat(df.deviceid,lit("_"),df.date))
# MAGIC
# MAGIC # step 1
# MAGIC # Use RFormula to create the feature vector
# MAGIC formula = RFormula(formula = "~" + "+".join(warning_all))
# MAGIC output = formula.fit(df).transform(df).select("key","features")
# MAGIC
# MAGIC
# MAGIC # step 2
# MAGIC # Before PCA, we need to standardize the features, it is very important...
# MAGIC # Note that StandardScaler does not work for sparse vector unless withMean=false
# MAGIC # OR we can convert sparse vector to dense vector first using toArray
# MAGIC scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
# MAGIC withStd=True, withMean=False)
# MAGIC
# MAGIC # Compute summary statistics by fitting the StandardScaler
# MAGIC scalerModel = scaler.fit(output)
# MAGIC
# MAGIC # Normalize each feature to have unit standard deviation.
# MAGIC scaledData = scalerModel.transform(output) | # MAGIC
# MAGIC | random_line_split | |
Notebook_1_DataCleansing_FeatureEngineering.py | deviceid','Categorical_1','Categorical_2','Categorical_3','Categorical_4',
'fault_code_type_1','fault_code_type_2',
'fault_code_type_3','fault_code_type_4',
'problemreported']
features_numeric = list(set(df.columns) -set(features_datetime)-set(features_categorical))
# COMMAND ----------
# MAGIC %md #### Handling missing data
# COMMAND ----------
print(df['fault_code_type_3',].head(3))
# there are some missing values, we need to handle in the subsequent steps
# COMMAND ----------
# handle missing values
df = df.fillna(0, subset=features_numeric)
df = df.fillna("Unknown", subset=features_categorical)
# check the results
print(df['fault_code_type_3',].head(3))
# COMMAND ----------
# MAGIC %md #### For data exploration part, people usually would like to visualize the distribution of certain columns or the interation among columns. Here, we hand picked some columns to demonstrate how to do some basic visualizations.
# COMMAND ----------
#------------------------------------ data exploration and visualization ------------------------------------#
# Register dataframe as a temp table in SQL context
df.createOrReplaceTempView("df1")
sqlStatement = """
SELECT problem_type_1, problem_type_2, problem_type_3, problem_type_4,
error_count_1, error_count_2, error_count_3, error_count_4,
error_count_5, error_count_6, error_count_7, error_count_8, problemreported
FROM df1
"""
plotdata = spark.sql(sqlStatement).toPandas();
%matplotlib inline
# show histogram distribution of some features
ax1 = plotdata[['problem_type_1']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_1 distribution')
ax1.set_xlabel('number of problem_type_1 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
ax1 = plotdata[['problem_type_2']].plot(kind='hist', bins=5, facecolor='blue')
ax1.set_title('problem_type_2 distribution')
ax1.set_xlabel('number of problem_type_2 per day'); ax1.set_ylabel('Counts');
plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show()
# show correlation matrix heatmap to explore some potential interesting patterns
corr = plotdata.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
display()
# COMMAND ----------
# MAGIC %md ## Feature Engineering
# MAGIC In the remaining part of the Notebook #1, we will demonstrate how to generate new features for this kind of use case. It is definitely not meant to be a comprehensive list.
# COMMAND ----------
# MAGIC %md In the following cell, we created some time features, calculated the total number of warning_type1 (type2) occured for a macine on a particular day. We also identified some data quality issue that some event counts had negative values.
# COMMAND ----------
# Extract some time features from "date" column
df = df.withColumn('month', month(df['date']))
df = df.withColumn('weekofyear', weekofyear(df['date']))
df = df.withColumn('dayofmonth', dayofmonth(df['date']))
# warning related raw features
warning_type1_features = list(s for s in df.columns if "warning_1_" in s)
warning_type2_features = list(s for s in df.columns if "warning_2_" in s)
warning_all = warning_type1_features + warning_type2_features
# total count of all type1 warnings each day each device
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
# total count of all type2 warnings each day each device
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# We realized that the warning counts have negative values
# Replace all the negative values with 0
def negative_replace(num):
if num < 0: return 0
else: return num
negative_replace_Udf = udf(negative_replace, IntegerType())
m = warning_type1_features + warning_type2_features
for col_n in m:
df = df.withColumn(col_n, negative_replace_Udf(df[col_n]))
# Then we have to re-calculate the total warnings again
df = df.withColumn('warn_type1_total', sum(df[col_n] for col_n in warning_type1_features))
df = df.withColumn('warn_type2_total', sum(df[col_n] for col_n in warning_type2_features))
print(df['warn_type1_total',].head(3))
print(df['warn_type2_total',].head(3))
# COMMAND ----------
# MAGIC %md #### Variables "categorical_1 to 4" are integer type but in fact they are categorical features. In the following cell, we binned those variables and created four new columns.
# COMMAND ----------
# Note: we can also use SparkSQL for this binning task
def Cat1(num):
if num <= 10: return '0-10'
elif 10 < num and num <= 20: return '11-20'
elif 20 < num and num <= 30: return '21-30'
elif 30 < num and num <= 40: return '31-40'
else: return 'morethan40'
cat1Udf = udf(Cat1, StringType())
df = df.withColumn("cat1", cat1Udf('categorical_1'))
def Cat2(num):
if num <= 2000: return '0-2000'
elif 2000 < num and num <= 3000: return '2000-3000'
elif 3000 < num and num <= 4000: return '3000-4000'
elif 4000 < num and num <= 5000: return '4000-5000'
elif 5000 < num and num <= 6000: return '5000-6000'
else: return 'morethan6000'
cat2Udf = udf(Cat2, StringType())
df = df.withColumn("cat2", cat2Udf('categorical_2'))
def Cat3(num):
if num <= 200: return '0-200'
elif 200 < num and num <= 400: return '200-400'
elif 400 < num and num <= 600: return '400-600'
elif 600 < num and num <= 800: return '600-800'
else: return 'morethan800'
cat3Udf = udf(Cat3, StringType())
df = df.withColumn("cat3", cat3Udf('categorical_3'))
def Cat4(num):
if num <= 5000: return '0-5000'
elif 5000 < num and num <= 10000: return '5000-10000'
elif 10000 < num and num <= 15000: return '10000-15000'
elif 15000 < num and num <= 20000: return '15000-20000'
else: return ' | = udf(Cat4, StringType())
df = df.withColumn("cat4", cat4Udf('categorical_4'))
print(df.select('cat1').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat2').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat3').distinct().rdd.map(lambda r: r[0]).collect())
print(df.select('cat4').distinct().rdd.map(lambda r: r[0]).collect())
# COMMAND ----------
# MAGIC %md #### For variables "fault_code_type_1 to 4", if it is "Unknown" that means there is "0" fault code reported on that day for that machine, otherwise the count of fault code type 1 (2, 3, or 4) is 1.
# COMMAND ----------
df = df.withColumn("fault_code_type_1_count",F.when(df.fault_code_type_1!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_2_count",F.when(df.fault_code_type_2!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_3_count",F.when(df.fault_code_type_3!= "Unknown", 1).otherwise(0))\
.withColumn("fault_code_type_4_count",F.when(df.fault_code_type_4!= "Unknown", | morethan20000'
cat4Udf | conditional_block |
EffectObject.ts | without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
{
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private runPos():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
|
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
p | {
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
} | conditional_block |
EffectObject.ts | without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
{
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private | ():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
{
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
}
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
p | runPos | identifier_name |
EffectObject.ts | without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module d5power
{
export class EffectObject extends egret.DisplayObjectContainer implements ISpriteSheetWaiter
{
public static MAX_POOL_NUM:number = 100;
private static _pool_event:Array<EffectObject>=[];
public owner:IGD;
public target:IGD;
public skillid:number;
public static getInstance():EffectObject
{
var obj:EffectObject;
if(EffectObject._pool_event.length)
{
obj = EffectObject._pool_event.pop();
}else{
obj = new EffectObject();
obj._monitor = new egret.Bitmap();
}
return obj;
}
private static back2pool(obj:EffectObject):void
{
if(EffectObject._pool_event.length<EffectObject.MAX_POOL_NUM && EffectObject._pool_event.indexOf(obj)==-1) EffectObject._pool_event.push(obj);
}
public constructor(){
super();
}
public get loadID():number
{
return this._loadID;
}
public updateRayCopy(deep:number,angle:number):void
{
this._moveAngle+=angle*deep;
this._sonAngle+=angle*deep;
}
private _lastRender:number;
private _impl:EffectImplement;
private _playFrame:number=0;
private _totalframe:number;
public _spriteSheet:IDisplayer;
protected _monitor:egret.Bitmap;
private _loadID:number=0;
private _sheet:egret.SpriteSheet;
private _res:string;
protected _drawAction:Function;
private _liveStart:number = 0;
private _moveAngle:number;
private _sonAngle:number;
private _posx:number;
private _posy:number;
private _dir:number;
private _sonDeep:number;
public deleting:boolean=false;
private _offX:number;
private _offY:number;
public setup(start:number,data:EffectImplement,dir:number,posx:number,posy:number):void
{
this._impl = data;
this._moveAngle = data.getMoveAngle(dir);
this._sonAngle = data.getSonAngle(dir);
this._dir = dir;
this._posx = posx;
this._posy = posy;
this._liveStart = start;
this._sonDeep = data.sonFrameDeep;
this._monitor.alpha = 1;
this._monitor.rotation = 0;
this._monitor.scaleX = this._monitor.scaleY = 1;
this.deleting = false;
var res:string = this._impl.res;
var p:Array<any> = data.getDirectionPos(dir);
this._offX = p[0];
this._offY = p[1];
this._posx+=this._offX;
this._posy+=this._offY;
if(res.indexOf('.json')!=-1)
{
| {
this._res = res;
this.onTextureComplete(D5UIResourceData.getData(this._res).getResource(0));
}
}
private onTextureComplete(data:egret.Texture):void
{
this._monitor.texture = data;
this._totalframe = 5;
this._drawAction = this.draw;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
public onSpriteSheepReady(data:IDisplayer):void
{
if (this._spriteSheet) this._spriteSheet.unlink();
if(data == null) return;
this._spriteSheet = data;
this._totalframe = this._spriteSheet.totalFrame;
this._drawAction = this.drawJson;
this.runPos();
this._impl.lowLv ? D5Game.me.bottomLayer.addChild(this._monitor) : D5Game.me.topLayer.addChild(this._monitor);
}
private runPos():void
{
var target:egret.Point = D5Game.me.map.getScreenPostion(this._posx,this._posy);
if(this._monitor)
{
this._monitor.x = target.x;
this._monitor.y = target.y;
if(this._spriteSheet)
{
this._monitor.x+=this._spriteSheet.gX;
this._monitor.y+=this._spriteSheet.gY;
}else{
this._monitor.x-=this._monitor.width>>1;
this._monitor.y-=this._monitor.height>>1;
}
}
}
private _lastCheck:number;
public render():void
{
this._drawAction!=null ? this._drawAction() : 0;
}
private draw():void
{
var t:number = egret.getTimer();
if(this._impl.live>0 && t-this._liveStart>this._impl.live)
{
this.dispose();
return;
}
var cost_time:number = (t - this._liveStart) / this._impl.playSpeed;
if (this._playFrame != cost_time)
{
this._playFrame = Math.floor(cost_time % this._totalframe);
if(this._impl.moveSpeed!=0)
{
this._posx+=Math.cos(this._moveAngle)*this._impl.moveSpeed;
this._posy+=Math.sin(this._moveAngle)*this._impl.moveSpeed;
}
this.runPos();
if(this._impl.alphaSpeed!=0)
{
this._monitor.alpha+=this._impl.alphaSpeed;
if(this._monitor.alpha<=0)
{
this.dispose();
return;
}
}
if(this._impl.zoomSpeed!=0)
{
this._monitor.scaleX+=this._impl.zoomSpeed;
this._monitor.scaleY+=this._impl.zoomSpeed;
}
if(this._impl.rotationSpeed!=0)
{
this.rotation+=this._impl.rotationSpeed;
}
if(this._playFrame==this._impl.sonFrame && this._sonDeep>0)
{
var obj:EffectObject = this.clone(true);
obj._sonDeep = --this._sonDeep;
obj._posx = this._posx+this._impl.sonSpeed*Math.cos(this._sonAngle);
obj._posy = this._posy+this._impl.sonSpeed*Math.sin(this._sonAngle);
D5Game.me.addEffect(obj);
}
if(this._playFrame==this._totalframe-1 && this._totalframe>0)
{
this.dispose();
}
}
}
private drawJson():void
{
if(egret.getTimer()-this._lastRender<this._spriteSheet.renderTime) return;
this.draw();
this._lastRender = egret.getTimer();
var direction:number = 0;
this._monitor.texture = this._spriteSheet.getTexture(direction,this._playFrame);
if(this._spriteSheet.uvList)
{
var f: number = direction * this._spriteSheet.totalFrame + this._playFrame;
this._monitor.x+= this._spriteSheet.uvList[f].offX;
this._monitor.y+= this._spriteSheet.uvList[f].offY;
}
else
{
this._monitor.x+= this._spriteSheet.gX;
this._monitor.y+= this._spriteSheet.gY;
}
this._playFrame++;
if(this._playFrame>=this._spriteSheet.totalFrame) this._playFrame=0;
}
/**
* @param allPro 是否克隆全部属性
*/
public clone(allPro:boolean=false):EffectObject
{
var p:EffectObject = EffectObject.getInstance();
p.setup(D5Game.me.timer,this._impl,this._dir,this._posx,this._posy);
p._moveAngle = this._moveAngle;
| this._res = res.substr(0,res.length-5);
this._loadID++;
D5SpriteSheet.getInstance(this._res+'.png',this);
}
else if(res.indexOf('.png')!=-1)
| random_line_split |
config.go | Bind string `toml:"bind" mapstructure:"bind" env:"SERVER_BIND"`
// https bind address. ":<port>" for all interfaces
TLSBind string `toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string | // http bind address. ":<port>" for all interfaces | random_line_split | |
config.go | toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) | (v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v | ConfigureViper | identifier_name |
config.go | toml:"tls-bind" mapstructure:"tls-bind" env:"SERVER_TLS_BIND"`
// TLS certificate file path
TLSCertFile string `toml:"tls-cert-file" mapstructure:"tls-cert-file" env:"SERVER_TLS_CERT_FILE"`
// TLS key file path
TLSKeyFile string `toml:"tls-key-file" mapstructure:"tls-key-file" env:"SERVER_TLS_KEY_FILE"`
// Maximum duration before timing out read of the request
ReadTimeout time.Duration `toml:"read-timeout" mapstructure:"read-timeout" env:"SERVER_READ_TIMEOUT"`
// Maximum duration before timing out write of the response
WriteTimeout time.Duration `toml:"write-timeout" mapstructure:"write-timeout" env:"SERVER_WRITE_TIMEOUT"`
// Maximum duration before timing out idle keep-alive connection
IdleTimeout time.Duration `toml:"idle-timeout" mapstructure:"idle-timeout" env:"SERVER_IDLE_TIMEOUT"`
// Use manifest.json assets mapping
AssetsManifest bool `toml:"assets-manifest" mapstructure:"assets-manifest" env:"SERVER_ASSETS_MANIFEST"`
// Disable browsers caching asset files by setting response headers
AssetsNoCache bool `toml:"assets-no-cache" mapstructure:"assets-no-cache" env:"SERVER_ASSETS_NO_CACHE"`
// RenderRecompile enables recompilation of the template on every render call.
// This should be used in development mode so no server restart is required
// on template file changes.
RenderRecompile bool `toml:"render-recompile" mapstructure:"render-recompile" env:"SERVER_RENDER_RECOMPILE"`
// Use the development mode sessions storer opposed to production mode storer
// defined in app/sessions.go -- Usually a cookie storer for dev
// and disk storer for prod.
SessionsDevStorer bool `toml:"sessions-dev-storer" mapstructure:"sessions-dev-storer" env:"SERVER_SESSIONS_DEV_STORER"`
// PublicPath defaults to "public" but can be set to something else
// by the {{.AppEnvName}}_SERVER_PUBLIC_PATH environment variable.
// This is set by the "abcweb dev" command to instruct the app to
// load assets from a /tmp folder instead of the local public folder.
PublicPath string `toml:"public-path" mapstructure:"public-path" env:"SERVER_PUBLIC_PATH"`
}
// DBConfig holds the Postgres database config for the app loaded through
// environment variables, or the config.toml file.
type DBConfig struct {
// The Postgres database name
DBName string `toml:"dbname" mapstructure:"dbname" env:"DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" |
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < | {
env = v.GetString("env")
} | conditional_block |
config.go | DB_DBNAME"`
Host string `toml:"host" mapstructure:"host" env:"DB_HOST"`
Port int `toml:"port" mapstructure:"port" env:"DB_PORT"`
User string `toml:"user" mapstructure:"user" env:"DB_USER"`
Pass string `toml:"pass" mapstructure:"pass" env:"DB_PASS"`
SSLMode string `toml:"sslmode" mapstructure:"sslmode" env:"DB_SSLMODE"`
// Throw an error when the app starts if the database is not
// using the latest migration
EnforceMigration bool `toml:"enforce-migration" mapstructure:"enforce-migration" env:"DB_ENFORCE_MIGRATION"`
}
// Bind your passed in config flags to a new viper
// instance, retrieves the active environment section of your config file using
// that viper instance, and then loads your server and db config into
// the passed in cfg struct and validates the db config is set appropriately.
func (c *Config) Bind(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v, err := c.NewSubViper(flags, cfg)
if err != nil {
return v, err
}
if err := UnmarshalAppConfig(cfg, v); err != nil {
return v, err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// Check if there's a DBConfig object in the cfg struct.
// If found, then validate all fields on it are set appropriately.
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if err := ValidateDBConfig(dbCfg); err != nil {
return v, err
}
break
}
return v, nil
}
// NewSubViper returns a viper instance activated against the active environment
// configuration subsection and initialized with the config.toml
// configuration file and the environment variable prefix.
// It also takes in the configuration struct so that it can generate the env
// mappings.
func (c *Config) NewSubViper(flags *pflag.FlagSet, cfg interface{}) (*viper.Viper, error) {
v := viper.New()
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
if err := c.ConfigureViper(v); err != nil {
return nil, err
}
// Use the env from the config if it's not explicitly set
env := c.LoadEnv
if env == "" {
env = v.GetString("env")
}
v = v.Sub(env)
if v == nil {
return nil, fmt.Errorf("cannot find env section named %s", env)
}
mappings, err := GetTagMappings(cfg)
if err != nil {
return nil, errors.Wrap(err, "unable to get tag mappings for config struct")
}
if c.EnvPrefix != "" {
for _, m := range mappings {
v.BindEnv(m.chain, strings.Join([]string{c.EnvPrefix, m.env}, "_"))
}
} else {
for _, m := range mappings {
v.BindEnv(m.chain, m.env)
}
}
if v == nil {
return nil, fmt.Errorf("unable to load environment %q from %q", env, c.File)
}
if flags != nil {
if err := v.BindPFlags(flags); err != nil {
return nil, err
}
}
v.Set("env", env)
return v, nil
}
// ConfigureViper sets the viper object to use the passed in config toml file
// and also configures the configuration environment variables.
func (c *Config) ConfigureViper(v *viper.Viper) error {
v.SetConfigType("toml")
v.SetConfigFile(c.File)
v.SetEnvPrefix(c.EnvPrefix)
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if err := v.ReadInConfig(); err != nil {
return err
}
v.AutomaticEnv()
return nil
}
// UnmarshalAppConfig unmarshals the viper's configured config file
// into the passed in cfg object containing an AppConfig
func UnmarshalAppConfig(cfg interface{}, v *viper.Viper) error {
err := v.Unmarshal(cfg)
if err != nil {
return err
}
val := reflect.Indirect(reflect.ValueOf(cfg))
// if cfg has an embedded AppConfig then we need to unmarshal
// directly into that and overwrite it in the parent struct,
// since its another layer of indirection and viper
// can't handle it magically.
for i := 0; i < val.NumField(); i++ {
appCfg, ok := val.Field(i).Interface().(AppConfig)
if !ok {
continue
}
v.Unmarshal(&appCfg)
val.Field(i).Set(reflect.ValueOf(appCfg))
// overwrite val to point to the AppConfig so DBConfig can be set below.
val = val.Field(i)
break
}
// Find *DBConfig and set object appropriately
for i := 0; i < val.NumField(); i++ {
dbCfg, ok := val.Field(i).Interface().(DBConfig)
if !ok {
continue
}
if dbCfg.Port == 0 {
dbCfg.Port = 5432
}
if dbCfg.SSLMode == "" {
dbCfg.SSLMode = "require"
}
val.Field(i).Set(reflect.ValueOf(dbCfg))
// Finished working on the db cfg struct, so break out
break
}
return nil
}
// ValidateDBConfig returns an error if any of the required db config
// fields are not set to their appropriate values.
func ValidateDBConfig(cfg DBConfig) error {
err := vala.BeginValidation().Validate(
vala.StringNotEmpty(cfg.User, "user"),
vala.StringNotEmpty(cfg.Host, "host"),
vala.Not(vala.Equals(cfg.Port, 0, "port")),
vala.StringNotEmpty(cfg.DBName, "dbname"),
vala.StringNotEmpty(cfg.SSLMode, "sslmode"),
).Check()
if err != nil {
return err
}
return nil
}
// Mapping represents a chain which is a list of nested object mapstructures
// joined together and seperated by dots (i.e. one.two.three), and the
// accompanying environment variable tag value for the last item in the chain
type Mapping struct {
chain string
env string
}
// Mappings is a slice of mapping
type Mappings []Mapping
func getTagMappingsRecursive(chain string, v reflect.Value) (Mappings, error) {
mappings := Mappings{}
for i := 0; i < v.NumField(); i++ {
cv := v.Field(i)
tag := v.Type().Field(i).Tag
ms := tag.Get("mapstructure")
env := tag.Get("env")
if cv.Kind() == reflect.Ptr {
nv := reflect.Indirect(cv)
// If it has no mapstructure set then fail gracefully,
// because it's probably not a field that should be read by viper.
// For example, a pointer to something that is late-initialized
// and isn't loaded by Bind or present in the config file.
if !nv.IsValid() && ms == "" {
continue
} else if !nv.IsValid() {
return nil, fmt.Errorf("cannot access non-initialized pointer %#v", cv)
}
// Only indirect struct types, if they're valid
if nv.Kind() == reflect.Struct {
cv = nv
}
}
// nc = newchain
var nc string
if chain != "" {
nc = strings.Join([]string{chain, ms}, ".")
} else {
nc = ms
}
switch cv.Kind() {
case reflect.Struct:
m, err := getTagMappingsRecursive(nc, cv)
if err != nil {
return nil, err
}
mappings = append(mappings, m...)
default:
if env != "" && ms != "" {
mappings = append(mappings, Mapping{chain: nc, env: env})
}
}
}
return mappings, nil
}
// GetTagMappings returns the viper .BindEnv mappings for an entire config
// struct.
func GetTagMappings(cfg interface{}) (Mappings, error) {
return getTagMappingsRecursive("", reflect.Indirect(reflect.ValueOf(cfg)))
}
// NewFlagSet creates the set of flags specific to the server and db config
// and the root level config (like --version, --env)
func NewFlagSet() *pflag.FlagSet {
flags := &pflag.FlagSet{}
flags.AddFlagSet(NewRootFlagSet())
flags.AddFlagSet(NewServerFlagSet())
flags.AddFlagSet(NewDBFlagSet())
return flags
}
// NewRootFlagSet returns a list of top level flags (flags that arent contained
// inside an environment section in the config)
func NewRootFlagSet() *pflag.FlagSet | {
flags := &pflag.FlagSet{}
// root level flags
flags.BoolP("version", "", false, "Display the build version hash")
flags.StringP("env", "e", "prod", "The config files environment to load")
return flags
} | identifier_body | |
show_alignment.py | 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
|
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
"""Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for | qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN | conditional_block |
show_alignment.py | _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
| """Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for stopping
further processing is given back to the user and a ValueError is
raised. In case no inconsistency is found a summary report is also
generated and the function returns True. The function accepts 1)
only equal length for obereZeile, untereZeile 2) only the input
alphabets + INPUT_GAP_ZEICHEN ("_")
"""
rv = True
# 1) only equal length for obereZeile, untereZeile
if (len(obereZeile) != len(untereZeile)):
print("============================================================")
print("input sequences do not have the same length")
print("============================================================")
raise ValueError("Input sequences of different lengths")
# 2) only the input alphabets + INPUT_GAP_ZEICHEN ("_")
validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)
validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile) | identifier_body | |
show_alignment.py | 0
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def | (self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
residue = seqList[i]
if str.upper(residue) not in list(_alphabet):
aSpotted_Index = i
aSpotted_residue = residue
break
rv = {
"residueIndex": aSpotted_Index,
"residue": aSpotted_residue,
"recognizedAlphabet": self.VALID_DNA_OR_PROTEIN
}
if (aSpotted_residue != ""):
if self.aligntIsDna:
rv["recognizedAlphabet"] = self.INVALID_DNA
else:
rv["recognizedAlphabet"] = self.INVALID_PROTEIN
return(rv)
# bool|ValueError inputCheckpoint(string obereZeile, string untereZeile)
def inputCheckpoint(self, obereZeile, untereZeile):
"""Function checking the consistency of an alignment and generating
output of the first section of showAlignment in its behalf. If an
inconsistency is detected information about reasons for | __qualitaetsListeProteins | identifier_name |
show_alignment.py | # "Skelett" des Programs zum Zeigen eines Alignments mit einer
# Qualitaetszeile Haltet euch bitte an diese Struktur - dort wo das
# "pass" steht muss euer Quelltext kommen. Das pass muss dazu
# geloescht werden.
# Autor: Alex Finck
# Datum der letzten Aenderung: 09.07.2020
#
# usage examples from the command line:
# > python show_alignment.py "AACTG_GTCAT" "AGTCAA_CTGA"
# > python show_alignment.py -iprotein "ACTG_GTCA" "GTCAA_CTG"
from Bio.SubsMat.MatrixInfo import pam30
import argparse
class ShowAlignment:
def __init__(self, aligntIsDna=True):
# Liste die angibt wie gut ein Match ist. Siehe Aufgabenzettel -
self.OUTPUT_WIDTH_IN_COLS = 60
self.qualitaetsListe = ["AA", "GG", "CC", "TT", "CT", "TC", "AG", "GA",
"CA", "AC", "CG", "GC", "TA", "AT", "TG", "GT"]
self.qualitaetsListeProteins = self.__qualitaetsListeProteins()
# beware: the following variable has coupling with the above
# _qualitaetsListeProteins function
self.PROTEIN_ALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ_'
self.DNA_ALPHABET = "ACTG_"
self.INPUT_GAP_ZEICHEN = "_"
self.DNA_EXAKTER_MATCH = range(0, 4)
self.DNA_GUTER_MATCH = range(4, 8)
self.DNA_KEIN_GUTER_MATCH = range(8, 16)
# beware: the following 3 variables have coupling with the above
# _qualitaetsListeProteins function
self.AA_EXAKTER_MATCH = range(0, 19)
self.AA_GUTER_MATCH = range(19, 60)
self.AA_KEIN_GUTER_MATCH = range(60, 529)
self.EXAKTER_MATCH_ZEICHEN = "|"
self.GUTER_MATCH_ZEICHEN = ":"
self.KEIN_GUTER_MATCH_ZEICHEN = "."
self.QUAL_GAP_ZEICHEN = " "
# flags, the values of which are given back to the user
self.VALID_DNA_OR_PROTEIN = "valid dna|protein"
self.INVALID_PROTEIN = "invalid protein"
self.INVALID_DNA = "invalid dna"
# deductive and authoritative flag that gives to the script the prior
# information about
self.aligntIsDna = True
# string[] __qualitaetsListeProteins()
def __qualitaetsListeProteins(self):
"""Private function building and returning a quality list analog to
the quality list for dna nucleotides, but based upon the PAM30
Matrix; associated quaity ranges are defined in AA_EXAKTER_MATCH,
AA_GUTER_MATCH, AA_KEIN_GUTER_MATCH and correspond for
AA_GUTER_MATCH to remaining positve scores after removal of exact
matches and for AA_KEIN_GUTER_MATCH to negative scores,
respectively
"""
rv = []
pam30_sortierbar = {}
for key in pam30.keys():
pam30_sortierbar[str(pam30[key]) + ";" + ''.join(key)] = pam30[key]
if key[0] != key[1]:
pam30_sortierbar[
str(pam30[key]) + ";" + ''.join((key[1], key[0]))
] = pam30[key]
sorted_keys = list(pam30_sortierbar.keys())
sorted_keys.sort(key=lambda k: int(k.split(";")[0]), reverse=True)
# debugging kept for historical reasons
# for key in iter(sorted_keys):
# print(key.split(";")[1] + " has score " + str(pam30_sortierbar[key]))
for key in iter(sorted_keys):
rv.append(key.split(";")[1])
return(rv)
# string getQuality(string obereZeile, string untereZeile)
def getQuality(self, obereZeile, untereZeile):
"""Function that returns in the form of a string a quality of an
alignment consisting of two input sequences of dna or proteins.
The quality depnds on the prior input of the user given by the
aligntIsDna Flag. Quality for dna sequence pairs further depends
upon the list 'qualitaetsListe' and for amino acid sequences upon
the list 'qualitaetsListeProteins'.
"""
qualitaetsZeile = ""
if self.aligntIsDna:
_exakter_match_list = self.DNA_EXAKTER_MATCH
_guter_match_list = self.DNA_GUTER_MATCH
_kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListe
else:
_exakter_match_list = self.AA_EXAKTER_MATCH
_guter_match_list = self.AA_GUTER_MATCH
_kein_guter_match_list = self.AA_KEIN_GUTER_MATCH
_qualitaetsListe = self.qualitaetsListeProteins
for i in range(len(obereZeile)):
if (
obereZeile[i] == self.INPUT_GAP_ZEICHEN or
untereZeile[i] == self.INPUT_GAP_ZEICHEN
):
qualitaetsZeile += self.QUAL_GAP_ZEICHEN
else:
currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])
# print(currentResiduePair)
indexOfPair = _qualitaetsListe.index(currentResiduePair)
if indexOfPair in _exakter_match_list:
qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN
if indexOfPair in _guter_match_list:
qualitaetsZeile += self.GUTER_MATCH_ZEICHEN
if indexOfPair in _kein_guter_match_list:
qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN
return(qualitaetsZeile)
# bool|ValueError showAlignment(cls, string zeile1, string zeile2)
def showAlignment(self, zeile1, zeile2):
"""Function that processes an existing alignment of dna or proteins
into a console output projection to a quality space determined by
the function getQuality and typically consisting of zeile1 on the
top, zeile2 at the bottom and a quality string in between. The
console output is also separated in two sections. The first
section is giving a feedback to the user about the consistency of
the input alignment. In case of a consistent alignment and user
choice (between dna and protein) a second section is displayed
showing the alignment together with its quality within 60 columns
of a properly (fixed fonts, more than 60 columns) configured
console. In case of a succesful output to the console the function
returns True. In case the consistency of the input is falsified,
the first section is gracefully given back to the user, but
processing of the input is interrupted by a ValueError exception.
"""
if (self.inputCheckpoint(zeile1, zeile2)):
# get the quality
quality_zeile = self.getQuality(zeile1, zeile2)
start_index = 0
cutter_index = self.OUTPUT_WIDTH_IN_COLS
while (start_index < len(quality_zeile)):
print(zeile1[start_index:cutter_index])
print(quality_zeile[start_index:cutter_index])
print(zeile2[start_index:cutter_index])
start_index = cutter_index
targeted_end_index = cutter_index + self.OUTPUT_WIDTH_IN_COLS
if targeted_end_index <= len(quality_zeile):
cutter_index = targeted_end_index
else:
cutter_index = len(quality_zeile)
return True
# {residueIndex : int, residue : char, recognizedAlphabet : string} getValidityOfResiduesInSequence(string seq)
def getValidityOfResiduesInSequence(self, seq):
"""Function returning the consistency of an individual input sequence
as a dictionary containing in the inconsistent case the residue
location and value of the first inconsistency and values
confirming the validity of the input sequence otherwise.
"""
seqList = list(seq)
aSpotted_Index = -1
aSpotted_residue = ""
if self.aligntIsDna:
_alphabet = self.DNA_ALPHABET
else:
_alphabet = self.PROTEIN_ALPHABET
# iterate over the sequence given the prior knowldege of the user
for i in range(len(seqList)):
| until the length of the alignment is reached.
""" | random_line_split | |
lib.rs | fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn | (
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option | insert | identifier_name |
lib.rs | fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
} | env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option |
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from( | random_line_split |
lib.rs | fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => |
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> | {
joint.insert(&target.name, Self::Leaf(&package.id, target));
} | conditional_block |
lib.rs | fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps, .. } = resolve
.nodes
.iter()
.find(|cm::Node { id, .. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds, .. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind, .. }| *kind != cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg, .. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @ ..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if !doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> | for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start, .. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option | {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
| identifier_body |
Travel.py | the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
|
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
| DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s | identifier_body |
Travel.py | day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
|
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu |
# ## Linear Model | random_line_split |
Travel.py |
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def | evalute | identifier_name | |
Travel.py | easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
| MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred ) | conditional_block | |
scripts.js |
function keyDownHandler(e)
{
//hit left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
if(loadingCompleted){
keyboardMoveLeft = true;
}
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
updateStatusLine();
}
function tick(event) //custom tick function
{
// stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
| {
timerLength--;
console.log(timerLength);
updateTimerLine();
if(timerLength<1){
clearTimeout(timer);
loseLife();
}
} | identifier_body | |
scripts.js |
}
//hit right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
if(loadingCompleted){
keyboardMoveRight = true;
}
}
if(e.key ==" "){
if(loadingCompleted){
startLevel();
}else{
alert("Please wait until loading complete");
}
}
}
function keyUpHandler(e)
{
//release left arrow
if(e.key =="Left" || e.key =="ArrowLeft"){
keyboardMoveLeft = false;
}
//release right arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
updateStatusLine();
}
function tick(event) //custom tick function
{
// stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH | {
keyboardMoveLeft = true;
} | conditional_block | |
scripts.js | Green").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
ball2.x = paddle.x;
ball2.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
ball2.xSpeed = 3;
ball2.ySpeed = 3;
ball2.up = true;
ball2.right = true;
stage.addChild(ball2);
}
stage.update();
}
}
stage.update();
}
function checkCollisionForPowerUps(powerUpElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
//current left,right top and bottom border of powerUpElement
var powerUpLeftBorder = powerUpElement.x - 20;
var powerUpRightBorder = powerUpElement.x + 20;
//var powerUpTopBorder = powerUpElement.y - 20;
var powerUpBottomBorder = powerUpElement.y + 50;
// if the statement is true, the power up is inside of rectangle of the hit element
if((powerUpLeftBorder<=rightBorder) && (powerUpRightBorder >= leftBorder) && (powerUpBottomBorder <= bottomBorder) && (powerUpBottomBorder >= topBorder))
{
return true;
}
return false;
}
function checkCollision(ballElement,hitElement)
{
//for the hit element ;get bounds to get the rectangle of a general element
var leftBorder = (hitElement.x - hitElement.getBounds().width/2); //get the left border
var rightBorder = (hitElement.x + hitElement.getBounds().width/2);
var topBorder = (hitElement.y - hitElement.getBounds().height/2);
var bottomBorder = (hitElement.y + hitElement.getBounds().height/2);
var previousBallLeftBorder = ballElement.lastX - BALL_RADIUS;
var previousBallRightBorder = ballElement.lastX + BALL_RADIUS;
var previousBallTopBorder = ballElement.lastY - BALL_RADIUS;
var previousBallBottomBorder = ballElement.lastY + BALL_RADIUS;
//current left,right top and bottom border of ball
var ballLeftBorder = ballElement.x - BALL_RADIUS;
var ballRightBorder = ballElement.x + BALL_RADIUS;
var ballTopBorder = ballElement.y - BALL_RADIUS;
var ballBottomBorder = ballElement.y + BALL_RADIUS;
// if the statement is true, the ball is inside of rectangle of the hit element
if((ballLeftBorder<=rightBorder) && (ballRightBorder >= leftBorder) && (ballTopBorder <= bottomBorder) && (ballBottomBorder >= topBorder))
{
if((ballTopBorder <= bottomBorder)&&(previousBallTopBorder > bottomBorder))
{
//the if statement above ensures that ball Hit from the bottom
ballElement.up = false;
ballElement.y = bottomBorder + BALL_RADIUS; //to make sure ball not entering the inside
}
if((ballBottomBorder >= topBorder)&&(previousBallBottomBorder<topBorder))
{
//Hit from the top
ballElement.up = true;
ballElement.y = topBorder - BALL_RADIUS;
}
if((ballLeftBorder<=rightBorder)&&(previousBallLeftBorder>rightBorder))
{
//Hit from the right
ballElement.right = true;
ballElement.x = rightBorder + BALL_RADIUS;
}
if((ballRightBorder >= leftBorder)&&(previousBallRightBorder < leftBorder))
{
//Hit from the left
ballElement.right = false;
ballElement.x = leftBorder - BALL_RADIUS;
}
//update the lastx and lasty
ballElement.lastX = ballElement.x;
ballElement.lastY = ballElement.y;
return true;
}
return false;
}
function dropApple(hitElement){
//create new apple based on the location of the brick
var apple = new createjs.Sprite(spriteSheet, "apple");
apple.x = hitElement.x;
apple.y = hitElement.y + 20;
stage.addChild(apple);
appleArray.push(apple);
}
function dropBanana(hitElement){
//create new banana base on the location of the brick
var banana = new createjs.Sprite(spriteSheet, "banana");
banana.x = hitElement.x ;
banana.y = hitElement.y + 20;
stage.addChild(banana);
bananaArray.push(banana);
}
function newBallXSpeedAfterCollision(ballElement,hitElement)
{
var startPoint = hitElement.x - hitElement.getBounds().width/2;
var midPoint = hitElement.x;
var endPoint = hitElement.x + hitElement.getBounds().width/2;
if(ballElement.x<midPoint) //once we hit left part
{
ballElement.right = false;
ballElement.xSpeed = FULL_X_SPEED - ((ballElement.x - startPoint)/(midPoint-startPoint)) * FULL_X_SPEED
}
else //once we hit the right part
{
ballElement.xSpeed = FULL_X_SPEED - ((endPoint - ballElement.x)/(endPoint-midPoint)) * FULL_X_SPEED
ballElement.right = true;
}
}
function createBrickGrid()
{
removeAllBricks();
var currentLevelConfig = levelArray[currentGameLevel-1];
for(var i = 0;i<14;i++) //i value is in charge of x value, means column
for(var j = 0;j<currentLevelConfig.rowCount;j++) //j value in charge of y value, means row
{
//If line break is set, and current row index is on even number, skip this row
if(currentLevelConfig.rowbreak == true && j%2 == 0){
continue;
}
//Only draw the columns of bricks as per the level configuration
if( (i>=currentLevelConfig.leftSpaceStartIndex && i<=currentLevelConfig.leftSpaceEndIndex) ||
(i>=currentLevelConfig.rightSpaceStartIndex && i<=currentLevelConfig.rightSpaceEndIndex)
){
var randomColor = getBrickColor(); //
//10 is the space between each brick
createBrick(i*(BRICKS_WIDTH+10)+40,j*(BRICKS_HEIGHT+5)+20, randomColor);
}
}
}
function getBrickColor(){
//Use math random to decide the color of this brick
var randomNumber = Math.random();
//if 0.6 - 1 return blue, 0.4-0.6 return yellow, 0.2-0.4 return orange, 0-0.2 return black
if(randomNumber > 0.6){
return "blue"; //level 1 brick
}else if(randomNumber > 0.4){
return "yellow"; //level 2 brick
}else if(randomNumber > 0.2){
return "orange"; //level 3 brick
}else{
return "black"; //level 4 brick
}
}
//Create single brick
function createBrick(x,y,color)
{
var brick = new createjs.Shape();
brick.graphics.beginFill(color);
brick.graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
brick.graphics.endFill(); //complete the drawing of the shape
//Set the name with color, so that we can use the name to determine the processing of brick hit
brick.name = color;
//change the brick registration point to let it shrink from center instead of top left corner
brick.regX = BRICKS_WIDTH/2;
brick.regY = BRICKS_HEIGHT/2;
//move the brick to see the entire brick
brick.x = x;
brick.y = y;
brick.setBounds(brick.regX,brick.regY,BRICKS_WIDTH,BRICKS_HEIGHT);
stage.addChild(brick); //add created object to the stage
bricks.push(brick); //push each brick to bricks array
}
function removeAllBricks(){
//destroy all bricks
for(var i=0;i<bricks.length;i++){
destroyBrickInstantly(bricks[i]);
bricks.splice(i,1);
i--;
}
}
function createBall()
{
ball = new createjs.Shape();
ball.graphics.beginFill("Red").drawCircle(0,0, BALL_RADIUS); //circle radius is 8px
//move the ball to the middle of the paddle for initial position
ball.x = paddle.x;
ball.y = paddle.y- PADDLE_HEIGHT/2 - BALL_RADIUS; //make sure deduct half of paddle height and ball radius
stage.addChild(ball);
ball.up = true;
ball.right = true; //determine whether ball goes up or down
ball.xSpeed = 0; // initial state
ball.ySpeed = 0; //initial state
//save the previous location of ball
ball.lastX = 0;
ball.lastY = 0;
}
function | destroyBrick | identifier_name | |
scripts.js | arrow
if(e.key =="Right" || e.key =="ArrowRight"){
keyboardMoveRight = false;
}
}
function addToScore(points)
{
console.log("score added");
score+=points;
if(score > highScore){
highScore = score;
}
updateStatusLine();
}
function updateStatusLine()
{
scoreText.text = "Score: "+score + " / Lives: "+lives+" / High Score: "+highScore;
}
function createScoreText()
{
scoreText = new createjs.Text("Score: 0", "16px Arial", "#000000");
addToScore(0);
scoreText.x = stage.canvas.width/2 - 150;
scoreText.y = stage.canvas.height - 36;
stage.addChild(scoreText);
}
function updateTimerLine() //for display timer on the page
{
timerText.text = "Timer: "+ timerLength ;
}
function createTimerText(){
timerText = new createjs.Text("Timer: 90", "16px Arial", "#000000");
timerText.x = stage.canvas.width/2 - 100;
timerText.y = stage.canvas.height - 15;
stage.addChild(timerText);
}
function loseLife()
{
console.log("Lost a life");
stage.removeChild(ball);
ball = null;
stage.removeChild(ball2);
ball2 = null;
createjs.Sound.play("soundDeath");
clearTimeout(timer);
timerLength = levelArray[currentGameLevel-1].timelimit;
updateTimerLine();
lives--;
updateStatusLine();
createBall();
ball.xSpeed = 0;
ball.ySpeed = 0;
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
gameStarted = false; //switch to game pause state again
//reset high score for every life lost
localStorage.highScore = score;
//remove all the power ups
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
//remove the apple
stage.removeChild(apple);
}
appleArray = [];
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
//remove the banana
stage.removeChild(banana);
}
bananaArray = [];
//handle when the life is 0, reset the score and restart game
if(lives==0)
{
if(highScore<score)
{
highScore = score;
localStorage.highScore = score;
}
lives = 3; //reset the lives
score = 0; //reset the score
createBrickGrid(); //reset bricks
}
| // stage.update(); //update the stage manually
//move paddle based on left and right key
if(keyboardMoveLeft)
{
console.log("Keyboard- Left");
paddle.x-=5;
}
if(keyboardMoveRight)
{
console.log("Keyboard- Right");
paddle.x+=5;
}
// one fix to make sure paddle not moving through the walls of stage
if(paddle.x+PADDLE_WIDTH/2>stage.canvas.width)
{
paddle.x = stage.canvas.width - PADDLE_WIDTH/2;
}
if(paddle.x-PADDLE_WIDTH/2<0)
{
paddle.x = PADDLE_WIDTH/2;
}
//make sure ball is in the middle surface of paddle and no action taken when its paused in every tick
if(!gameStarted)
{
ball.x = paddle.x;
ball.y = paddle.y - PADDLE_HEIGHT/2 - BALL_RADIUS;
stage.update();
return;
}
if(ball != null){
if(ball.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball.y -= ball.ySpeed;
}
else
{
ball.y += ball.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball.right)//if its going right,move to the right.
{
ball.x += ball.xSpeed;
}
else
{
ball.x -= ball.xSpeed;
}
}
if(ball2 != null){
if(ball2.up) // y axis value starts 0 at the top, so have ball's x and y value removed by 1 in each tick.
{
ball2.y -= ball2.ySpeed;
}
else
{
ball2.y += ball2.ySpeed; //if its going down ,move to the bottom of the screen
}
if(ball2.right)//if its going right,move to the right.
{
ball2.x += ball2.xSpeed;
}
else
{
ball2.x -= ball2.xSpeed;
}
}
//move the power up down
for(var i=0;i<appleArray.length;i++){
var apple = appleArray[i];
apple.y++;
}
for(var i=0;i<bananaArray.length;i++){
var banana = bananaArray[i];
banana.y++;
}
//check if each brick in the array collides with the ball
for(var i=0;i<bricks.length;i++)
{
if(ball != null && checkCollision(ball,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");//add sound when hit happens
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
for(var i=0;i<bricks.length;i++)
{
if(ball2!=null && checkCollision(ball2,bricks[i]))
{
addToScore(100);
createjs.Sound.play("soundBreak");
console.log("Brick hit / New Score: "+score);
var brickColor = bricks[i].name;
//Destroy the brick only if this is blue brick, otherwise lower the brick color by 1
if(brickColor == "blue"){
destroyBrick(bricks[i]); //if there is collision, destroy the brick
bricks.splice(i,1); //remove the brick from array
i--; //minus array element index to get the following brick to move to the disappeared bricks position
}else{
if(brickColor == "black"){
brickColor = "orange";
//drop apple power up only when the paddle is at original length
if(paddle.scaleX == 1){ //when the paddle length is not increased yet
dropApple(bricks[i]);
}
}
else if(brickColor == "orange"){
brickColor = "yellow";
//drop banana power up only when the second ball does not exist
if(ball2 == null){ //when there is no second ball
dropBanana(bricks[i]);
}
}
else if(brickColor == "yellow"){
brickColor = "blue";
}
bricks[i].name = brickColor;
bricks[i].graphics.beginFill(brickColor);
bricks[i].graphics.drawRect(0, 0, BRICKS_WIDTH, BRICKS_HEIGHT);
bricks[i].graphics.endFill(); //complete the drawing of the shape
}
}
}
//If no bricks left, you win
if(bricks.length == 0){
stage.update();
createjs.Sound.play("soundDeath");
clearTimeout(timer);
gameStarted = false;
alert("You won!");
window.location.href = "win.html";
}
//if the red ball exists
if(ball != null){
if(checkCollision(ball,paddle))
{
createjs.Sound.play("soundShot");
newBallX | updateStatusLine();
}
function tick(event) //custom tick function
{ | random_line_split |
spp-final.py | g_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
|
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx | off_id = off_check[0]
ins_perf = False | conditional_block |
spp-final.py | (self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, | __init__ | identifier_name | |
spp-final.py | g_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
|
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
c | try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e)) | identifier_body |
spp-final.py | _dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
| ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
| next(frows, None)
offer_base_rs = []
| random_line_split |
TypeScriptHelpers.ts | names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static | <T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const | findFirstChildNode | identifier_name |
TypeScriptHelpers.ts | names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined |
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
| {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
} | identifier_body |
TypeScriptHelpers.ts | names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol { | const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
}
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
const match | random_line_split | |
TypeScriptHelpers.ts | names for late-bound symbols derived from `unique symbol` declarations
// which have the form of "__@<variableName>@<symbolId>", i.e. "__@someSymbol@12345".
private static readonly _uniqueSymbolNameRegExp: RegExp = /^__@.*@\d+$/;
/**
* This traverses any symbol aliases to find the original place where an item was defined.
* For example, suppose a class is defined as "export default class MyClass { }"
* but exported from the package's index.ts like this:
*
* export { default as _MyClass } from './MyClass';
*
* In this example, calling followAliases() on the _MyClass symbol will return the
* original definition of MyClass, traversing any intermediary places where the
* symbol was imported and re-exported.
*/
public static followAliases(symbol: ts.Symbol, typeChecker: ts.TypeChecker): ts.Symbol {
let current: ts.Symbol = symbol;
for (;;) {
if (!(current.flags & ts.SymbolFlags.Alias)) {
break;
}
const currentAlias: ts.Symbol = typeChecker.getAliasedSymbol(current);
if (!currentAlias || currentAlias === current) {
break;
}
current = currentAlias;
}
return current;
}
/**
* Returns true if TypeScriptHelpers.followAliases() would return something different
* from the input `symbol`.
*/
public static isFollowableAlias(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
if (!(symbol.flags & ts.SymbolFlags.Alias)) {
return false;
}
const alias: ts.Symbol = typeChecker.getAliasedSymbol(symbol);
if (!alias || alias === symbol) {
return false;
}
return true;
}
/**
* Certain virtual symbols do not have any declarations. For example, `ts.TypeChecker.getExportsOfModule()` can
* sometimes return a "prototype" symbol for an object, even though there is no corresponding declaration in the
* source code. API Extractor generally ignores such symbols.
*/
public static tryGetADeclaration(symbol: ts.Symbol): ts.Declaration | undefined {
if (symbol.declarations && symbol.declarations.length > 0) {
return symbol.declarations[0];
}
return undefined;
}
/**
* Returns true if the specified symbol is an ambient declaration.
*/
public static isAmbient(symbol: ts.Symbol, typeChecker: ts.TypeChecker): boolean {
const followedSymbol: ts.Symbol = TypeScriptHelpers.followAliases(symbol, typeChecker);
if (followedSymbol.declarations && followedSymbol.declarations.length > 0) {
const firstDeclaration: ts.Declaration = followedSymbol.declarations[0];
// Test 1: Are we inside the sinister "declare global {" construct?
const highestModuleDeclaration: ts.ModuleDeclaration | undefined = TypeScriptHelpers.findHighestParent(
firstDeclaration,
ts.SyntaxKind.ModuleDeclaration
);
if (highestModuleDeclaration) {
if (highestModuleDeclaration.name.getText().trim() === 'global') {
return true;
}
}
// Test 2: Otherwise, the main heuristic for ambient declarations is by looking at the
// ts.SyntaxKind.SourceFile node to see whether it has a symbol or not (i.e. whether it
// is acting as a module or not).
const sourceFile: ts.SourceFile = firstDeclaration.getSourceFile();
if (typeChecker.getSymbolAtLocation(sourceFile)) {
return false;
}
}
return true;
}
/**
* Same semantics as tryGetSymbolForDeclaration(), but throws an exception if the symbol
* cannot be found.
*/
public static getSymbolForDeclaration(declaration: ts.Declaration, checker: ts.TypeChecker): ts.Symbol {
const symbol: ts.Symbol | undefined = TypeScriptInternals.tryGetSymbolForDeclaration(
declaration,
checker
);
if (!symbol) {
throw new InternalError(
'Unable to determine semantic information for declaration:\n' +
SourceFileLocationFormatter.formatDeclaration(declaration)
);
}
return symbol;
}
// Return name of the module, which could be like "./SomeLocalFile' or like 'external-package/entry/point'
public static getModuleSpecifier(
nodeWithModuleSpecifier: ts.ImportDeclaration | ts.ExportDeclaration | ts.ImportTypeNode
): string | undefined {
if (nodeWithModuleSpecifier.kind === ts.SyntaxKind.ImportType) {
// As specified internally in typescript:/src/compiler/types.ts#ValidImportTypeNode
if (
nodeWithModuleSpecifier.argument.kind !== ts.SyntaxKind.LiteralType ||
(nodeWithModuleSpecifier.argument as ts.LiteralTypeNode).literal.kind !== ts.SyntaxKind.StringLiteral
) {
throw new InternalError(
`Invalid ImportTypeNode: ${nodeWithModuleSpecifier.getText()}\n` +
SourceFileLocationFormatter.formatDeclaration(nodeWithModuleSpecifier)
);
}
const literalTypeNode: ts.LiteralTypeNode = nodeWithModuleSpecifier.argument as ts.LiteralTypeNode;
const stringLiteral: ts.StringLiteral = literalTypeNode.literal as ts.StringLiteral;
return stringLiteral.text.trim();
}
// Node is a declaration
if (
nodeWithModuleSpecifier.moduleSpecifier &&
ts.isStringLiteralLike(nodeWithModuleSpecifier.moduleSpecifier)
) {
return TypeScriptInternals.getTextOfIdentifierOrLiteral(nodeWithModuleSpecifier.moduleSpecifier);
}
return undefined;
}
/**
* Returns an ancestor of "node", such that the ancestor, any intermediary nodes,
* and the starting node match a list of expected kinds. Undefined is returned
* if there aren't enough ancestors, or if the kinds are incorrect.
*
* For example, suppose child "C" has parents A --> B --> C.
*
* Calling _matchAncestor(C, [ExportSpecifier, NamedExports, ExportDeclaration])
* would return A only if A is of kind ExportSpecifier, B is of kind NamedExports,
* and C is of kind ExportDeclaration.
*
* Calling _matchAncestor(C, [ExportDeclaration]) would return C.
*/
public static matchAncestor<T extends ts.Node>(
node: ts.Node,
kindsToMatch: ts.SyntaxKind[]
): T | undefined {
// (slice(0) clones an array)
const reversedParentKinds: ts.SyntaxKind[] = kindsToMatch.slice(0).reverse();
let current: ts.Node | undefined = undefined;
for (const parentKind of reversedParentKinds) {
if (!current) {
// The first time through, start with node
current = node;
} else {
// Then walk the parents
current = current.parent;
}
// If we ran out of items, or if the kind doesn't match, then fail
if (!current || current.kind !== parentKind) {
return undefined;
}
}
// If we matched everything, then return the node that matched the last parentKinds item
return current as T;
}
/**
* Does a depth-first search of the children of the specified node. Returns the first child
* with the specified kind, or undefined if there is no match.
*/
public static findFirstChildNode<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
for (const child of node.getChildren()) {
if (child.kind === kindToMatch) {
return child as T;
}
const recursiveMatch: T | undefined = TypeScriptHelpers.findFirstChildNode(child, kindToMatch);
if (recursiveMatch) {
return recursiveMatch;
}
}
return undefined;
}
/**
* Returns the first parent node with the specified SyntaxKind, or undefined if there is no match.
*/
public static findFirstParent<T extends ts.Node>(node: ts.Node, kindToMatch: ts.SyntaxKind): T | undefined {
let current: ts.Node | undefined = node.parent;
while (current) |
return undefined;
}
/**
* Returns the highest parent node with the specified SyntaxKind, or undefined if there is no match.
* @remarks
* Whereas findFirstParent() returns the first match, findHighestParent() returns the last match.
*/
public static findHighestParent<T extends ts.Node>(
node: ts.Node,
kindToMatch: ts.SyntaxKind
): T | undefined {
let current: ts.Node | undefined = node;
let highest: T | undefined = undefined;
for (;;) {
current = TypeScriptHelpers.findFirstParent<T>(current, kindToMatch);
if (!current) {
break;
}
highest = current as T;
}
return highest;
}
/**
* Decodes the names that the compiler generates for a built-in ECMAScript symbol.
*
* @remarks
* TypeScript binds well-known ECMAScript symbols like `[Symbol.iterator]` as `__@iterator`.
* If `name` is of this form, then `tryGetWellKnownSymbolName()` converts it back into e.g. `[Symbol.iterator]`.
* If the string does not start with `__@` then `undefined` is returned.
*/
public static tryDecodeWellKnownSymbolName(name: ts.__String): string | undefined {
| {
if (current.kind === kindToMatch) {
return current as T;
}
current = current.parent;
} | conditional_block |
index.js | map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) |
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
. | {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
} | conditional_block |
index.js | americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration(500)
.style("opacity", 0);
});
var scat = scatter
.selectAll("circle");
// Update chart when zooming
function updateChart() {
// Recover the new scale
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
// Update axes with these new boundaries
xAxis.call(d3.axisBottom(newX))
yAxis.call(d3.axisLeft(newY))
// Update circle position
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
}
function zoomed() | {
var newX = d3.event.transform.rescaleX(x);
var newY = d3.event.transform.rescaleY(y);
xAxis.call(d3.axisBottom(newX).tickFormat(function(date) {
if (d3.event.transform.k == 1) {
return d3.timeFormat("%b %Y")(date);
} else {
return d3.timeFormat("%b %e, %Y")(date);
}}));
scat.attr('cx', function (d) {
return newX(d.Date)
})
.attr('cy', function (d) {
return newY(d.Popularity_log)
});
} | identifier_body | |
index.js | map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
| });
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function drawScatter(searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration | d3.selectAll("g > *").remove();
drawScatter(americaResults);
}) | random_line_split |
index.js | map.set(data[i].Word, []);
}
// Get the array of the word and push the date.
map.get(data[i].Word).push(data[i].Date);
}
});
drawScatter(myList);
// Set the dimensions and margins of the graph
var margin = {top: 10, right: 30, bottom: 30, left: 60},
width = 1100 - margin.left - margin.right,
height = 700 - margin.top - margin.bottom;
// Append the svg object to the body of the page
var wid = width + margin.left + margin.right;
var hei = height + margin.top + margin.bottom;
var svg = d3.select("#dataviz")
.append("svg")
.attr('preserveAspectRatio', 'xMinYMin meet')
.attr('viewBox', "0 0 " + wid + " " + hei)
//.attr("width", width + margin.left + margin.right)
//.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// First suggestion
d3.select("#link1")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'makeamericagreatagain') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
// Second suggestion
d3.select("#link2")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var clintonResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'clinton') {
clintonResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(clintonResults);
})
});
// Third suggestion
d3.select("#link3")
.on("click", function(d) {
d3.event.preventDefault();
d3.csv(wordData).then(function(data) {
var americaResults = [];
data.forEach(function(d) {
d.Date = parseTime(d.Date);
if (d.Word === 'republican') {
americaResults.push(d.Date);
}
});
d3.selectAll("g > *").remove();
drawScatter(americaResults);
})
});
//search callback
d3.select("#form")
.on("submit", function(d) {
d3.event.preventDefault();
var input = document.getElementById("input").value;
var tokens = input.trim().split(" ");
var searchResults = [];
let valid = true;
let regex = /[^A-Za-z_]/;
for (let i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].toLowerCase().trim().replace(regex, "");
if (!map.has(tokens[i])) {
valid = false;
}
}
if (valid) {
let arr = map.get(tokens[0]);
for (let i = 0; i < arr.length; i++) {
// So that we store a copy rather than the references themselves
searchResults.push(arr[i]);
}
for (let i = 1; i < tokens.length; i++) {
let temp = []; // Temp variable that holds valid dates.
let nextArray = map.get(tokens[i]);
for (let j = 0; j < nextArray.length; j++) {
// Iterate through the next token's dates
for (let k = 0; k < searchResults.length; k++) {
// Iterate through the dates in search result
if (searchResults[k] == nextArray[j]) {
// only push those dates that are already in search result in temp
// as the results should be only the tweets that have all the words in the input.
temp.push(searchResults[k]);
}
}
}
searchResults = temp;
}
for (let i = 0; i < searchResults.length; i++) {
searchResults[i] = parseTime(searchResults[i]);
}
}
d3.selectAll("g > *").remove();
//console.log(searchResults);
if (input == "") { // User did not input anything
drawScatter(null);
} else if(searchResults.length == 0){
console.log("else if" + searchResults);
drawScatter(searchResults, true);
} else {
//console.log(searchResults);
drawScatter(searchResults);
}
});
// Draw scatterplot
function | (searchResults, errFlag) {
d3.csv(csvFile).then(function (data) {
// Convert to Date format
data.forEach(function (d) {
d.Date = parseTime(d.Date);
});
if(errFlag){
d3.select("#err")
.style("opacity", 1);
}else{
d3.select("#err")
.style("opacity", 0);
}
// Zoom feature
var zoom = d3.zoom()
.scaleExtent([1, 20])
//translateExtent insert bounds
//or restrict zoom to one axis
.translateExtent([[0, 0], [width, height]])
.extent([[0, 0], [width, height]])
.on("zoom", zoomed);
//svg.call(zoom)
// Add X axis
var x = d3.scaleTime()
.domain(d3.extent(data, function (d) {
return d.Date;
}))
.range([0, width]);
var xAxis = svg.append("g")
.attr("transform", "translate(0," + (height - 20) + ")")
.call(d3.axisBottom(x).tickFormat(d3.timeFormat("%b %Y")));
// Add Y axis
var y = d3.scaleLinear()
.domain([0, 20])
.range([height - 20, 0]);
var yAxis = svg.append("g")
.call(d3.axisLeft(y));
svg.append("rect")
.attr("width", width)
.attr("height", height)
.style("fill", "none")
.style("pointer-events", "all")
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
.call(zoom);
// Define the div for the tooltip
var div = d3.select("body")
.append("div")
.attr("class", "tooltip")
.style("opacity", 0)
.style("pointer-events", "none");
// Add a clipPath: everything out of this area won't be drawn.
var clip = svg.append("defs").append("svg:clipPath")
.attr("id", "clip")
.append("svg:rect")
.attr("width", width)
.attr("height", height-20)
.attr("x", 0)
.attr("y", 0);
var scatter = svg.append('g')
.attr("clip-path", "url(#clip)");
// Text label for the x axis
svg.append("text")
.attr("transform",
"translate(" + (width/2) + " ," +
(height + margin.top + 20) + ")")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Date");
// Text label for the y axis
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-family", "trebuchet ms")
.text("Popularity");
//Add dots
scatter.selectAll("dot")
.data(data)
.enter()
.append("circle")
.attr("cx", function (d) {
return x(d.Date);
})
.attr("cy", function (d) {
return y(d.Popularity_log);
})
.attr("r", 3)
.style("fill", function(d) {
if(searchResults == null){return "#00acee"} //"#cc2400"
for (var i = 0; i < searchResults.length; i++) {
if (searchResults[i] != null && searchResults[i].getTime() === d.Date.getTime()) {
return "#00acee";
}
}
return "none";
})
.on("mouseover", function (d) {
div.transition()
.duration(200)
.style("opacity", .9);
div.text(d.Tweet_Text)
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function (d) {
div.transition()
.duration | drawScatter | identifier_name |
train_k_fold.py | checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
|
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with | net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss | identifier_body |
train_k_fold.py | points/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_ | o('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
| iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.inf | conditional_block |
train_k_fold.py | points/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train
parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoin | old_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
| ts/train_k_f | identifier_name |
train_k_fold.py | ='checkpoints/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=100)
parser.add_argument('-seg_num', type=int, default=10)
parser.add_argument('-kernel_num', type=int, default=100)
parser.add_argument('-kernel_sizes', type=list, default=[3, 4, 5])
parser.add_argument('-model', type=str, default='RNN_RNN')
parser.add_argument('-hidden_size', type=int, default=200)
# train | parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-train_dir', type=str, default='../../data/chinese/cont2sum/little/train.json')
parser.add_argument('-embedding', type=str, default='../../data/chinese/cont2sum/little/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../data/chinese/cont2sum/little/word2id.json')
parser.add_argument('-report_every', type=int, default=1500)
parser.add_argument('-seq_trunc', type=int, default=50)
parser.add_argument('-max_norm', type=float, default=1.0)
# test
parser.add_argument('-load_dir', type=str, default='checkpoints/AttnRNN_seed_1.pt')
parser.add_argument('-test_dir', type=str, default='../../data/chinese/cont2sum/little/test.json')
parser.add_argument('-ref', type=str, default='outputs/ref')
parser.add_argument('-hyp', type=str, default='outputs/hyp')
parser.add_argument('-topk', type=int, default=3)
# device
parser.add_argument('-device', type=int, default=0)
# option
parser.add_argument('-test', action='store_true')
parser.add_argument('-debug', action='store_true')
parser.add_argument('-predict', action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
# set cuda device and seed
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
random_state = args.seed
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
# origin: total_loss += loss.data[0]
total_loss += loss.data.item()
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train(n_val=50):
"""
验证集条数
:param n_val:
:return:
"""
logging.info('Loading vocab,train and val dataset.Wait a second,please')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = summarunner_weather.utils.Dataset(examples[: -n_val])
val_dataset = summarunner_weather.utils.Dataset(examples[-n_val:]) # 从train数据集中拿n_val条做验证集
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
for epoch in range(1, args.epochs + 1):
print("epoch: ", epoch)
for i, batch in enumerate(train_iter):
print("batch num: ", i)
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save()
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
def train_k_fold(log_path='checkpoints/train_k_fold_RNN_RNN_info.txt'):
embed = torch.Tensor(np.load(args.embedding)['embedding'])
with open(args.word2id) as f:
word2id = json.load(f)
vocab = summarunner_weather.utils.Vocab(embed, word2id)
with open(args.train_dir, 'r', encoding='utf-8') as f:
examples = [json.loads(line) for line in f]
train_X = [example['content'] for example in examples]
train_y = [example['labels'] for example in examples]
args,embed_num = embed.size(0)
args.embed_dim = embed.size(1)
infos = []
cv_ptr = 0
for train_index, val_index in KFold(n_splits=10, random_state=random_state, shuffle=True).split(train_X, train_y):
train_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in train_index]
val_data = [{'content': examples[i]['content'], 'labels': examples[i]['labels'],
'summary': examples[i]['summary']} for i in val_index]
train_dataset = summarunner_weather.utils.Dataset(train_data)
val_dataset = summarunner_weather.utils.Dataset(val_data)
# build model
net = getattr(summarunner_weather.models, args.model)(args, embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss() # Binary Cross Entropy loss
# model info
print(net)
# params = sum(p.numel() for p in list(net.parameters())) / 1e6
# print('#Params: %.1fM' % params)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
t1 = time()
train_loss, val_loss = [], []
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
print("epoch: {}, batch num: {}".format(epoch, i))
features, targets, _, doc_lens = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
if use_gpu:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
train_loss.append(float(loss.data))
if args.debug:
print('Batch ID:%d Loss:%f' % (i, loss.data[0]))
continue
if i % args.report_every == 0:
cur_loss = eval(net, vocab, val_iter, criterion)
if cur_loss < min_loss:
min_loss = cur_loss
net.save(cv_ptr)
val_loss.append(cur_loss)
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f' % (epoch, min_loss, cur_loss))
t2 = time()
logging.info('Total Cost:%f h' % ((t2 - t1) / 3600))
with | parser.add_argument('-lr', type=float, default=1e-4)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-epochs', type=int, default=30) | random_line_split |
roadtrip_compute.py | 3e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector() | waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
| with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1, | random_line_split |
roadtrip_compute.py | e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_ | onnector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results | trip_section(db_c | identifier_name |
roadtrip_compute.py | e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""store trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
| route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
| conditional_block | |
roadtrip_compute.py | e7'
#API_KEY = 'cc3bc7b1-4c27-4176-aefd-15017c363178'
#API_KEY = '57f195e9-78a9-4fd7-a10c-312f0502d659'
#constantes
API_NAVITIA = "https://api.sncf.com/v1/coverage/sncf/journeys?key={3}&from=admin:fr:{0}&to=admin:fr:{1}&datetime={2}&count=20"
all_waypoints = None
def datetime_str_to_datetime_str(datetime_str, fromFormat="%Y%m%dT%H%M%S", toFormat="%d/%m/%Y à %H:%M"):
"""Convert datetime in string format to another datetime string
Args:
datetime_str (str): input string
fromFormat (str, optional): input datetime format. Defaults to "%Y%m%dT%H%M%S".
toFormat (str, optional): output datetime format. Defaults to "%d/%m/%Y à %H:%M".
Returns:
str: output datetime to string formated in the given format
"""
date_time = datetime.strptime(datetime_str, fromFormat)
return date_time.strftime(toFormat)
def store_section(db_connector, description, geo_point_from, geo_point_to, section_type, duration=None, co2=None):
"""st | save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel):
"""format trip section informations then print & store in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
from_city_insee (str): from city insee code
to_city_insee (str): to city insee code
best_travel (json): data about trip section
"""
from_city_name = all_waypoints.loc[all_waypoints[0]
== from_city_insee].values[0][2]
to_city_name = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][2]
from_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
to_city_gps = all_waypoints.loc[all_waypoints[0]
== to_city_insee].values[0][3]
store_section(db_connector, 'Voyage de {} à {}. Départ le {} - Arrivée le {} après {} transferts '.format(from_city_name, to_city_name, datetime_str_to_datetime_str(best_travel['departure_date_time']), datetime_str_to_datetime_str(best_travel['arrival_date_time']), best_travel['nb_transfers']),
None,
None,
'SECTION',
best_travel['duration'],
best_travel['co2_emission']["value"]
)
for section in best_travel['sections']:
if 'from' in section:
if not section['type'] == 'crow_fly':
# vilaine faute d'orthographe sur transfer_type
if not 'transfer_type' in section or not section['transfer_type'] == 'walking':
store_section(db_connector, '{} - {} ({})'.format(section['from']['name'], section['to']['name'], section['display_informations']['physical_mode']),
from_city_gps,
to_city_gps,
'SUB_SECTION')
# else : initiale section, not used
else:
store_section(db_connector, 'Waiting {} minutes'.format(section['duration']/60),
None,
None,
'DELAY')
def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):
"""run the treatment to find the best optimized trip
Args:
trip_start_date (datetime): trip start date in format "%Y%m%dT%H%M%S"
is_min_co2_search (bool): specify is optimisation is based on co2 emission or duration
is_force_compute (bool): force the re-calculation of trips betweens all prefecture (very slow)
"""
waypoint_co2 = {}
waypoint_durations = {}
# get all prefectures referential
db_connector = Connector()
with db_connector:
results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)
all_waypoints = pd.DataFrame(results.fetchall())
# Vérification si les trajets péfecture à préfecture ont été déjà calculés
db_connector = Connector()
with db_connector:
saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
# Dans le précalcul des trajets optimaux, utilisation de la date courante
travel_date = datetime.now().strftime("%Y%m%dT%H%M%S")
bad_waypoints = []
if saved_waypoints.rowcount > 0 and not is_force_compute:
print("le référentiel des voyage existe déjà")
else:
try:
bdd_management.truncate_journey()
for (from_city, to_city) in combinations(all_waypoints[0].values, 2):
try:
if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:
continue
route = requests.get(API_NAVITIA.format(
int(from_city), int(to_city), travel_date, API_KEY))
response = json.loads(route.text)
mid_duration = 0
mid_co2 = 0
for journey in response["journeys"]:
mid_duration += journey["duration"]
mid_co2 += journey["co2_emission"]["value"]
waypoint_co2[frozenset([from_city, to_city])
] = mid_co2/len(response["journeys"])
waypoint_durations[frozenset(
[from_city, to_city])] = mid_duration/len(response["journeys"])
except Exception as e:
print("Error with finding the route between %s and %s : %s" %
(from_city, to_city, response["error"]["message"]))
if 'no destination point' == response["error"]["message"]:
bad_waypoints.append(int(to_city))
if 'no origin point' == response["error"]["message"]:
bad_waypoints.append(int(from_city))
for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response["error"]["message"]):
if not int(bad_insee_code) in bad_waypoints:
bad_waypoints.append(int(bad_insee_code))
# Enregistrement des trajets point à point (préfecture à préfecture)
db_connector = Connector()
with db_connector:
for (waypoint1, waypoint2) in waypoint_co2.keys():
waypoint = [waypoint1,
waypoint2,
str(waypoint_co2[frozenset([waypoint1, waypoint2])]),
str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]
db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)
# commit trajets unitaires dans la bdd
db_connector.commit()
# enregistrement des préfectures non trouvée (pas de gare)
print(bad_waypoints)
db_connector = Connector()
with db_connector:
for bad_city in bad_waypoints:
db_connector.execute_nonquery(
sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))
#db_connector.commit()
except Exception as e:
print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')
waypoint_co2 = {}
waypoint_durations = {}
processed_waypoints = set()
db_connector = Connector()
with db_connector:
waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)
for row in waypoints:
waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]
waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]
processed_waypoints.update([row[0], row[1]])
travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )
# take most represented trip order
journey_groups = Counter(chain(*travel_results))
| ore trip section information in db
Args:
db_connector (psycopg 'connection'): connection instance to the database. Used to keep all requests in the same transaction
description (str): trip section resume
geo_point_from (str): start city coord (lat;long)
geo_point_to (float): end city coord (lat;long)
section_type (int): type of trip section [INFO, SECTION, SUB_SECTION, DELAY]
duration (int, optional): duration of the travel. Defaults to None.
co2 (float, optional): co2 emission for the travel. Defaults to None.
"""
indentation = ''
if section_type == 'DELAY' or section_type == 'SUB_SECTION':
indentation = ' -> '
print(indentation + description)
db_connector.execute_nonquery(sql.SQL_INSERT_FRENCH_TRIP_SECTION, [
geo_point_from, geo_point_to, description, section_type, duration, co2])
def | identifier_body |
http.go | URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompat | urn ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
` | ibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
ret | identifier_body |
http.go | nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^(((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))\.){3}((\d{1,2})|(1\d{1,2})|(2[0-4]\d)|(25[0-5]))$`,
)
|
// IsValidIP | identifier_name | |
http.go | URL并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body) |
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
`^ | } | random_line_split |
http.go | 并且解析JSON格式的返回数据
func DoURL(method, url string, body []byte) ([]byte, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// GetURL 请求URL
func GetURL(URL string) ([]byte, error) {
resp, err := http.Get(URL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return i | y)
}
// GetURL 请求URL
func CtxGetURL(URL string) ([]byte, error) {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(URL)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetValueURL 请求URL 附带参数
func GetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return GetURL(URL)
}
resp, err := http.Get(fmt.Sprint(URL, "?", params.Encode()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func CtxGetValueURL(URL string, params url.Values) ([]byte, error) {
if params == nil {
return CtxGetURL(URL)
}
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // 用完需要释放资源
// 默认是application/x-www-form-urlencoded
req.Header.SetMethod("GET")
req.SetRequestURI(fmt.Sprint(URL, "?", params.Encode()))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // 用完需要释放资源
if err := fasthttp.Do(req, resp); err != nil {
return nil, err
}
b := resp.Body()
return b, nil
}
// GetURLReceiveJSON GET请求 自动解析JSON
func GetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := GetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
func CtxGetURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := CtxGetValueURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("json.Unmarshal failed: %s, %v", body, err)
}
return nil
}
// PostURL 请求URL
func PostURL(URL string, params url.Values) ([]byte, error) {
resp, err := http.PostForm(URL, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// 检查http请求中是否包含所需参数
// Deprecated: 使用CheckNotNil代替
func CheckParam(hr *http.Request, args ...string) string {
if strings.ToUpper(hr.Method) == "GET" {
for _, val := range args {
rs := hr.FormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else if strings.ToUpper(hr.Method) == "POST" { //post
for _, val := range args {
rs := hr.PostFormValue(val)
if StringIsEmpty(rs) {
return val
}
}
return ""
} else {
return hr.Method
}
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostURLReceiveJSON(URL string, params url.Values, receive interface{}) error {
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostURLReceiveJSON POST请求 自动解析JSON
func PostMapReceiveJSON(URL string, maps map[string]string, receive interface{}) error {
params := url.Values{}
for k, v := range maps {
params.Set(k, v)
}
body, err := PostURL(URL, params)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("body:%v,err:%v", string(body), err)
}
return nil
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func PostJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSON POST请求 BODY为JSON格式 ContentType=application/json
func GetJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
client := &http.Client{}
reqest, err := http.NewRequest("GET", URL, bytes.NewReader(b))
reqest.Header.Add("Content-Type", "application/json")
resp, err := client.Do(reqest)
//resp, err := http.Post(URL, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// PostJSONReceiveJSON POST请求 BODY为JSON格式 ContentType=application/json 自动解析JSON
func PostJSONReceiveJSON(URL string, send, receive interface{}) error {
body, err := PostJSON(URL, send)
if err != nil {
return err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(body, receive)
if err != nil {
return fmt.Errorf("error:%v,body{%s}", err, body)
}
return nil
}
// PostToJSON POST请求 BODY为json格式
// Deprecated: Please use PostJSON to replace
func PostToJSON(URL string, v interface{}) ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(b))
if err != nil {
return nil, err
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// CheckNotNil 检查HTTP参数是否为空
func CheckNotNil(r *http.Request, args ...string) error {
if args == nil || r == nil {
return nil
}
switch r.Method {
case "GET":
query := r.URL.Query()
for _, v := range args {
if strings.TrimSpace(query.Get(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
case "POST":
for _, v := range args {
if strings.TrimSpace(r.PostFormValue(v)) == "" {
return fmt.Errorf("param(%s) is invalid", v)
}
}
default:
return errors.New("r.Method is not GET or POST")
}
return nil
}
// StringIsEmpty 判断是否有值为空或null或(null)
func StringIsEmpty(s ...string) bool {
var str string
for _, v := range s {
str = strings.TrimSpace(v)
if v == "" || strings.EqualFold(str, "(null)") || strings.EqualFold(str, "null") {
return true
}
}
return false
}
// WriteJSON 写入json字符串
func WriteJSON(w io.Writer, v interface{}) (int, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(v)
if err != nil {
return 0, err
}
return w.Write(b)
}
// GetRemoteIP 获取IP
func GetRemoteIP(r *http.Request) string {
if r == nil {
return ""
}
var ip = strings.TrimSpace(r.Header.Get("X-Real-IP"))
if ip == "" {
ip, _, _ = net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
}
return ip
}
// CheckRemoteIP 验证IP
// in ips return true
func CheckRemoteIP(r *http.Request, ips ...string) bool {
if r == nil {
return false
}
var ip = GetRemoteIP(r)
for _, v := range ips {
if ip == v {
return true
}
}
return false
}
var regIPv4 = regexp.MustCompile(
` | outil.ReadAll(resp.Bod | conditional_block |
segment.ts | ]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string, |
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
| },
}
},
}; | random_line_split |
segment.ts | ]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_fil |
// 簡轉繁專用
//segment.loadSynonymDic
t('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
| e).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options); | conditional_block |
segment.ts | ]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_v | g,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
| er?: strin | identifier_name |
segment.ts | ]', options.pathMain, options.novelID);
return Promise.resolve(options.files || FastGlob(globPattern, {
cwd: CWD_IN,
//absolute: true,
}) as any as Promise<string[]>)
.then(function (ls)
{
return _doSegmentGlob(ls, options);
})
;
}
export function _doSegmentGlob(ls: string[], options: IOptions)
{
const novel_root = options.novel_root || ProjectConfig.novel_root;
const segment = options.segment = getSegment(options.segment);
options.pathMain_out = options.pathMain_out || options.pathMain;
let CWD_IN = _path(options.pathMain, options.novelID, novel_root);
let CWD_OUT = _path(options.pathMain_out, options.novelID, novel_root);
return Promise
.resolve(ls)
.tap(function (ls)
{
if (ls.length == 0)
{
//console.log(CWD_IN);
return Promise.reject(ERROR_MSG_001);
}
})
.then(async function (ls)
{
let label = `all file ${ls.length}`;
console.time(label);
console.log(`all file ${ls.length}`);
let count_changed = 0;
let done_list = [] as string[];
let rs = await Promise.mapSeries(ls, async function (file, index, length)
{
let label = file;
//console.time(label);
// console.log('[start]', label);
let fillpath = path.join(CWD_IN, file);
let fillpath_out = path.join(CWD_OUT, file);
// console.log(fillpath);
// console.log(fillpath_out);
if (!fs.pathExistsSync(fillpath))
{
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: false,
};
}
else if (!file.match(/\.txt$/i))
{
done_list.push(file);
return {
file,
changed: false,
exists: true,
};
}
let text = await fs.loadFile(fillpath, {
autoDecode: true,
})
.then(v => crlf(v.toString()))
;
if (!text.replace(/\s+/g, ''))
{
//console.warn('[skip]', label);
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
return {
file,
changed: false,
exists: true,
};
}
let _now = Date.now();
let ks = await segment.doSegment(text);
let timeuse = Date.now() - _now;
let text_new = await segment.stringify(ks);
let changed = text_new != text;
if (changed)
{
// console.warn('[changed]', label);
await fs.outputFile(fillpath_out, text_new);
count_changed++;
}
if (changed)
{
}
else
{
//console.log('[done]', label);
}
done_list.push(file);
if (options.callback)
{
await options.callback(done_list, file, index, length);
}
ks = null;
text = undefined;
text_new = undefined;
return {
file,
changed,
exists: true,
};
});
console.timeEnd(label);
console[count_changed ? 'ok' : 'debug'](`file changed: ${count_changed}`);
return {
ls,
done_list,
count: {
file: ls.length,
changed: count_changed,
done: done_list.length,
},
}
})
;
}
export function _path(pathMain, novelID, novel_root = ProjectConfig.novel_root): string
{
let p: string;
try
{
p = path.resolve(novel_root, pathMain, novelID)
}
catch (e)
{
console.dir({
novel_root,
pathMain,
novelID,
});
throw e;
}
return p;
}
export function getSegment(segment?: Segment)
{
if (!segment)
{
if (!_segmentObject)
{
segment = _segmentObject = createSegment();
let db_dict = getDictMain(segment);
}
segment = _segmentObject;
}
return segment;
}
export function resetSegmentCache()
{
let cache_file = CACHE_FILE;
if (fs.existsSync(cache_file))
{
console.red(`[Segment] reset cache`);
fs.removeSync(cache_file);
}
}
export function createSegment(useCache: boolean = true)
{
const segment = new Segment({
autoCjk: true,
optionsDoSegment: {
convertSynonym: true,
},
});
let cache_file = CACHE_FILE;
let options = {
/**
* 開啟 all_mod 才會在自動載入時包含 ZhtSynonymOptimizer
*/
all_mod: true,
};
console.time(`讀取模組與字典`);
/**
* 使用緩存的字典檔範例
*/
if (useCache && fs.existsSync(cache_file))
{
//console.log(`發現 cache.db`);
let st = fs.statSync(cache_file);
let md = (Date.now() - st.mtimeMs) / 1000;
//console.log(`距離上次緩存已過 ${md}s`);
if (md < CACHE_TIMEOUT)
{
//console.log(st, md);
//console.log(`開始載入緩存字典`);
let data = JSON.parse(fs.readFileSync(cache_file).toString());
useDefault(segment, {
...options,
nodict: true,
});
segment.DICT = data.DICT;
segment.inited = true;
cache_file = null;
data = undefined;
}
}
if (!segment.inited)
{
//console.log(`重新載入分析字典`);
segment.autoInit(options);
// 簡轉繁專用
//segment.loadSynonymDict('zht.synonym.txt');
}
let db_dict = segment.getDictDatabase('TABLE', true);
db_dict.TABLE = segment.DICT['TABLE'];
db_dict.TABLE2 = segment.DICT['TABLE2'];
db_dict.options.autoCjk = true;
//console.log('主字典總數', db_dict.size());
console.timeEnd(`讀取模組與字典`);
if (useCache && cache_file)
{
//console.log(`緩存字典於 cache.db`);
fs.outputFileSync(cache_file, JSON.stringify({
DICT: segment.DICT,
}));
}
freeGC();
return segment;
}
export function getDictMain(segment: Segment)
{
return segment.getDictDatabase('TABLE');
}
export function runSegment()
{
let _cache_file_segment = path.join(ProjectConfig.cache_root, '.segment');
let _cache_segment: {
s_ver | string,
last_d_ver?: string,
list: {
[k: string]: {
[k: string]: {
s_ver?: string,
d_ver?: string,
last_s_ver?: string,
last_d_ver?: string,
},
}
},
};
let _s_ver: string = String(require("novel-segment").version || '1');
let _d_ver: string = String(require("segment-dict").version || '1');
if (fs.existsSync(_cache_file_segment))
{
try
{
_cache_segment = fs.readJSONSync(_cache_file_segment);
}
catch (e)
{
}
}
// @ts-ignore
_cache_segment = _cache_segment || {};
_cache_segment.list = _cache_segment.list || {};
{
let { last_s_ver, last_d_ver, s_ver, d_ver } = _cache_segment;
console.debug({
_s_ver,
_d_ver,
s_ver,
d_ver,
});
if (s_ver != _s_ver || d_ver != _d_ver)
{
resetSegmentCache();
}
}
const startTime = Date.now();
const MAX_SCRIPT_TIMEOUT = 20 * 60 * 1000;
let cancellablePromise = Bluebird
.mapSeries(FastGlob([
'*/*.json',
], {
cwd: path.join(ProjectConfig.cache_root, 'files'),
}), async function (id: string)
{
let [pathMain, novelID] = id.split(/[\\\/]/);
novelID = path.basename(novelID, '.json');
if ((Date.now() - startTime) > MAX_SCRIPT_TIMEOUT)
{
return Bluebird.reject(new CancellationError(`任務已取消 本次將不會執行 ${pathMain}, ${novelID}`))
}
let np = _path(pathMain, novelID);
if (!fs.existsSync(np))
{
console.error(pathMain, novelID);
await fs.remove(path.join(ProjectConfig.cache_root, 'files', id));
return -1;
}
| ?: string,
d_ver?: string,
last_s_ver?: | identifier_body |
lib.rs | else {
while v != 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if !create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance != 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if !self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key | {
while v != 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} | conditional_block | |
lib.rs | grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if !create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance != 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
} | };
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if !self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend | };
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true, | random_line_split |
lib.rs | (v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 {
while v != 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} else {
while v != 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if !create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance != 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if !self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i | default_layer | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.