code
stringlengths
101
5.91M
def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >']) register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer']) register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper']) register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3Reservation_methods(root_module, root_module['ns3::Reservation']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3Tap_methods(root_module, root_module['ns3::Tap']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3UanHelper_methods(root_module, root_module['ns3::UanHelper']) register_Ns3UanModesList_methods(root_module, root_module['ns3::UanModesList']) register_Ns3UanPacketArrival_methods(root_module, root_module['ns3::UanPacketArrival']) register_Ns3UanPdp_methods(root_module, root_module['ns3::UanPdp']) register_Ns3UanPhyListener_methods(root_module, root_module['ns3::UanPhyListener']) register_Ns3UanProtocolBits_methods(root_module, root_module['ns3::UanProtocolBits']) register_Ns3UanTxMode_methods(root_module, root_module['ns3::UanTxMode']) register_Ns3UanTxModeFactory_methods(root_module, root_module['ns3::UanTxModeFactory']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3AcousticModemEnergyModelHelper_methods(root_module, root_module['ns3::AcousticModemEnergyModelHelper']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UanHeaderCommon_methods(root_module, root_module['ns3::UanHeaderCommon']) register_Ns3UanHeaderRcAck_methods(root_module, root_module['ns3::UanHeaderRcAck']) register_Ns3UanHeaderRcCts_methods(root_module, root_module['ns3::UanHeaderRcCts']) register_Ns3UanHeaderRcCtsGlobal_methods(root_module, root_module['ns3::UanHeaderRcCtsGlobal']) register_Ns3UanHeaderRcData_methods(root_module, root_module['ns3::UanHeaderRcData']) register_Ns3UanHeaderRcRts_methods(root_module, root_module['ns3::UanHeaderRcRts']) register_Ns3UanMac_methods(root_module, root_module['ns3::UanMac']) register_Ns3UanMacAloha_methods(root_module, root_module['ns3::UanMacAloha']) register_Ns3UanMacCw_methods(root_module, root_module['ns3::UanMacCw']) register_Ns3UanMacRc_methods(root_module, root_module['ns3::UanMacRc']) register_Ns3UanMacRcGw_methods(root_module, root_module['ns3::UanMacRcGw']) register_Ns3UanNoiseModel_methods(root_module, root_module['ns3::UanNoiseModel']) register_Ns3UanNoiseModelDefault_methods(root_module, root_module['ns3::UanNoiseModelDefault']) register_Ns3UanPhy_methods(root_module, root_module['ns3::UanPhy']) register_Ns3UanPhyCalcSinr_methods(root_module, root_module['ns3::UanPhyCalcSinr']) register_Ns3UanPhyCalcSinrDefault_methods(root_module, root_module['ns3::UanPhyCalcSinrDefault']) register_Ns3UanPhyCalcSinrDual_methods(root_module, root_module['ns3::UanPhyCalcSinrDual']) register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, root_module['ns3::UanPhyCalcSinrFhFsk']) register_Ns3UanPhyDual_methods(root_module, root_module['ns3::UanPhyDual']) register_Ns3UanPhyGen_methods(root_module, root_module['ns3::UanPhyGen']) register_Ns3UanPhyPer_methods(root_module, root_module['ns3::UanPhyPer']) register_Ns3UanPhyPerCommonModes_methods(root_module, root_module['ns3::UanPhyPerCommonModes']) register_Ns3UanPhyPerGenDefault_methods(root_module, root_module['ns3::UanPhyPerGenDefault']) register_Ns3UanPhyPerUmodem_methods(root_module, root_module['ns3::UanPhyPerUmodem']) register_Ns3UanPropModel_methods(root_module, root_module['ns3::UanPropModel']) register_Ns3UanPropModelIdeal_methods(root_module, root_module['ns3::UanPropModelIdeal']) register_Ns3UanPropModelThorp_methods(root_module, root_module['ns3::UanPropModelThorp']) register_Ns3UanTransducer_methods(root_module, root_module['ns3::UanTransducer']) register_Ns3UanTransducerHd_methods(root_module, root_module['ns3::UanTransducerHd']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker']) register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel']) register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnergyHarvester_methods(root_module, root_module['ns3::EnergyHarvester']) register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource']) register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker']) register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3UanChannel_methods(root_module, root_module['ns3::UanChannel']) register_Ns3UanModesListChecker_methods(root_module, root_module['ns3::UanModesListChecker']) register_Ns3UanModesListValue_methods(root_module, root_module['ns3::UanModesListValue']) register_Ns3UanNetDevice_methods(root_module, root_module['ns3::UanNetDevice']) register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) register_Ns3AcousticModemEnergyModel_methods(root_module, root_module['ns3::AcousticModemEnergyModel']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3MobilityModel__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Double_Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac8Address_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Mac8Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Double_Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Unsigned_short_Const_ns3Mac8Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, unsigned short, const ns3::Mac8Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Unsigned_int_Unsigned_int_Double_Unsigned_int_Double_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Time, ns3::Time, unsigned int, unsigned int, double, unsigned int, double, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return
def get_model_space(num_layers): state_space = ModelSpace() for i in range(num_layers): state_space.add_layer(i, [State('Dense', units=5, activation='relu'), State('Dense', units=5, activation='tanh')]) return state_space
def gaussbern_rbm_tuple(var, dx=50, dh=10, n=sample_size): with util.NumpySeedContext(seed=1000): B = ((np.random.randint(0, 2, (dx, dh)) * 2) - 1.0) b = np.random.randn(dx) c = np.random.randn(dh) p = density.GaussBernRBM(B, b, c) B_perturb = (B + (np.random.randn(dx, dh) * np.sqrt(var))) gb_rbm = data.DSGaussBernRBM(B_perturb, b, c, burnin=50) return (p, gb_rbm)
class DistributionLoss(loss._Loss): def forward(self, model_output, real_output): size_average = True model_output_log_prob = F.log_softmax(model_output, dim=1) real_output_soft = F.softmax(real_output, dim=1) del model_output, real_output real_output_soft = real_output_soft.unsqueeze(1) model_output_log_prob = model_output_log_prob.unsqueeze(2) cross_entropy_loss = (- torch.bmm(real_output_soft, model_output_log_prob)) if size_average: cross_entropy_loss = cross_entropy_loss.mean() else: cross_entropy_loss = cross_entropy_loss.sum() return cross_entropy_loss
class TestQuaternion(unittest.TestCase): def setUp(self): pass def test_quaternion_from_rotation(self): phi = np.random.rand(3) phi_norm = np.linalg.norm(phi) theta = Quaternion.from_rotation(phi) self.assertAlmostEqual(theta.entries[0], np.cos((phi_norm / 2.0))) self.assertAlmostEqual(theta.entries[1], ((np.sin((phi_norm / 2.0)) * phi[0]) / phi_norm)) self.assertAlmostEqual(theta.entries[2], ((np.sin((phi_norm / 2.0)) * phi[1]) / phi_norm)) self.assertAlmostEqual(theta.entries[3], ((np.sin((phi_norm / 2.0)) * phi[2]) / phi_norm)) def test_quaternion_rot_matrix_det_one(self): for _ in range(10): theta = np.random.normal(0.0, 1.0, 4) theta = Quaternion((theta / np.linalg.norm(theta))) R = theta.rotation_matrix() self.assertAlmostEqual(np.linalg.det(R), 1.0) def test_multiply_quaternions(self): s = ((2 * random.random()) - 1.0) p1 = (((2.0 - (2 * np.abs(s))) * random.random()) - (1.0 - np.abs(s))) p2 = ((((2.0 - (2.0 * np.abs(s))) - (2.0 * np.abs(p1))) * random.random()) - ((1.0 - np.abs(s)) - np.abs(p1))) p3 = np.sqrt((((1.0 - (s ** 2)) - (p1 ** 2)) - (p2 ** 2))) theta1 = Quaternion(np.array([s, p1, p2, p3])) t = ((2 * random.random()) - 1.0) q1 = (((2.0 - (2 * np.abs(s))) * random.random()) - (1.0 - np.abs(s))) q2 = ((((2.0 - (2.0 * np.abs(s))) - (2.0 * np.abs(p1))) * random.random()) - ((1.0 - np.abs(s)) - np.abs(p1))) q3 = np.sqrt((((1.0 - (s ** 2)) - (p1 ** 2)) - (p2 ** 2))) theta2 = Quaternion(np.array([t, q1, q2, q3])) product = (theta1 * theta2) self.assertAlmostEqual(product.s, ((((t * s) - (p1 * q1)) - (p2 * q2)) - (p3 * q3))) self.assertAlmostEqual(product.entries[1], ((((s * q1) + (t * p1)) + (p2 * q3)) - (p3 * q2))) self.assertAlmostEqual(product.entries[2], ((((s * q2) + (t * p2)) + (p3 * q1)) - (p1 * q3))) self.assertAlmostEqual(product.entries[3], ((((s * q3) + (t * p3)) + (p1 * q2)) - (p2 * q1))) def test_quaternion_rotation_matrix(self): s = ((2 * random.random()) - 1.0) p1 = (((2.0 - (2 * np.abs(s))) * random.random()) - (1.0 - np.abs(s))) p2 = ((((2.0 - (2.0 * np.abs(s))) - (2.0 * np.abs(p1))) * random.random()) - ((1.0 - np.abs(s)) - np.abs(p1))) p3 = np.sqrt((((1.0 - (s ** 2)) - (p1 ** 2)) - (p2 ** 2))) theta = Quaternion(np.array([s, p1, p2, p3])) R = theta.rotation_matrix() self.assertAlmostEqual(R[0][0], (2.0 * (((theta.s ** 2) + (theta.p[0] ** 2)) - 0.5))) self.assertAlmostEqual(R[0][1], (2.0 * ((theta.p[0] * theta.p[1]) - (theta.s * theta.p[2])))) self.assertAlmostEqual(R[1][0], (2.0 * ((theta.p[0] * theta.p[1]) + (theta.s * theta.p[2])))) self.assertAlmostEqual(R[1][1], (2.0 * (((theta.s ** 2) + (theta.p[1] ** 2)) - 0.5))) self.assertAlmostEqual(R[2][2], (2.0 * (((theta.s ** 2) + (theta.p[2] ** 2)) - 0.5))) self.assertAlmostEqual(R[2][0], (2.0 * ((theta.p[0] * theta.p[2]) - (theta.s * theta.p[1])))) def test_rot_matrix_against_rodriguez(self): phi = np.random.normal(0.0, 1.0, 3) phi = (phi / np.linalg.norm(phi)) magnitude = np.random.uniform(0.0, np.pi) theta = Quaternion.from_rotation((phi * magnitude)) R = theta.rotation_matrix() omega = np.array([[0.0, ((- 1.0) * phi[2]), phi[1]], [phi[2], 0.0, ((- 1.0) * phi[0])], [((- 1.0) * phi[1]), phi[0], 0.0]]) R_rodriguez = ((np.identity(3) + (np.sin(magnitude) * omega)) + (np.inner(omega, omega.T) * (1.0 - np.cos(magnitude)))) for j in range(3): for k in range(3): self.assertAlmostEqual(R[(j, k)], R_rodriguez[(j, k)]) def test_quaternion_inverse(self): s = ((2 * random.random()) - 1.0) p1 = (((2.0 - (2 * np.abs(s))) * random.random()) - (1.0 - np.abs(s))) p2 = ((((2.0 - (2.0 * np.abs(s))) - (2.0 * np.abs(p1))) * random.random()) - ((1.0 - np.abs(s)) - np.abs(p1))) p3 = np.sqrt((((1.0 - (s ** 2)) - (p1 ** 2)) - (p2 ** 2))) theta = Quaternion(np.array([s, p1, p2, p3])) theta_inv = theta.inverse() identity = (theta * theta_inv) self.assertAlmostEqual(identity.s, 1.0) self.assertAlmostEqual(identity.p[0], 0.0) self.assertAlmostEqual(identity.p[1], 0.0) self.assertAlmostEqual(identity.p[2], 0.0) def test_quaternion_rotation_angle(self): s = ((2 * random.random()) - 1.0) p1 = (((2.0 - (2 * np.abs(s))) * random.random()) - (1.0 - np.abs(s))) p2 = ((((2.0 - (2.0 * np.abs(s))) - (2.0 * np.abs(p1))) * random.random()) - ((1.0 - np.abs(s)) - np.abs(p1))) p3 = np.sqrt((((1.0 - (s ** 2)) - (p1 ** 2)) - (p2 ** 2))) theta = Quaternion(np.array([s, p1, p2, p3])) rotation_angle = theta.rotation_angle() phi = Quaternion.from_rotation(rotation_angle) self.assertAlmostEqual(phi.s, theta.s) self.assertAlmostEqual(phi.p[0], theta.p[0]) self.assertAlmostEqual(phi.p[1], theta.p[1]) self.assertAlmostEqual(phi.p[2], theta.p[2]) def test_quaternion_stability(self): orientation = Quaternion([1.0, 0.0, 0.0, 0.0]) max_err = 0.0 for k in range(100000): theta = np.random.normal(0.0, 1.0, 4) theta = Quaternion((theta / np.linalg.norm(theta))) orientation = (theta * orientation) norm_err = abs((((orientation.s ** 2) + np.dot(orientation.p, orientation.p)) - 1.0)) if (norm_err > max_err): max_err = norm_err print('max_err is ', max_err) self.assertAlmostEqual(max_err, 0.0) def test_rot_matrix_stability(self): max_norm_err = 0.0 max_orthogonal_err = 0.0 R = np.identity(3) for k in range(100000): phi = np.random.normal(0.0, 1.0, 3) phi = (phi / np.linalg.norm(phi)) magnitude = np.random.uniform(0.0, np.pi) omega = np.array([[0.0, ((- 1.0) * phi[2]), phi[1]], [phi[2], 0.0, ((- 1.0) * phi[0])], [((- 1.0) * phi[1]), phi[0], 0.0]]) R_increment = ((np.identity(3) + (np.sin(magnitude) * omega)) + (np.inner(omega, omega.T) * (1.0 - np.cos(magnitude)))) R = np.inner(R_increment, R.T) norm_err = max([abs((np.linalg.norm(R[0]) - 1.0)), abs((np.linalg.norm(R[1]) - 1.0)), abs((np.linalg.norm(R[2]) - 1.0))]) if (norm_err > max_norm_err): max_norm_err = norm_err orthogonal_err = max([abs(np.inner(R[0], R[1])), abs(np.inner(R[0], R[2])), abs(np.inner(R[1], R[2]))]) if (orthogonal_err > max_orthogonal_err): max_orthogonal_err = orthogonal_err print('max_norm_err is ', max_norm_err) print('max_orthogonal_err is ', max_orthogonal_err) self.assertAlmostEqual(max_norm_err, 0.0) self.assertAlmostEqual(max_orthogonal_err, 0.0)
def get_pretrained_url(model: str, tag: str): if (model not in _PRETRAINED): return '' model_pretrained = _PRETRAINED[model] tag = tag.lower() if (tag not in model_pretrained): return '' return model_pretrained[tag]
class LatentModel(Model): def _init_params_to_attrs(self, params): self._num_nets = params.num_nets self._is_probabilistic = params.is_probabilistic self._net = params.network.to_module_list(as_sequential=True).to(self.device) self._latent_params = params.latent_object self._deterministic_sigma_multiplier = float(params.deterministic_sigma_multiplier) assert (self._deterministic_sigma_multiplier > 0) self._all_input_keys = (self._env_spec.observation_names + self._env_spec.action_names) def _init_setup(self): if (self._dataset_train is not None): output_stats = self._dataset_train.get_output_stats() else: output_stats = AttrDict({'mu': np.array([0]), 'sigma': np.array([1])}) output_stats.leaf_modify((lambda arr: np.broadcast_to(arr, self._env_spec.names_to_shapes.obs))) logger.debug(('[Latent Model] obs stats: mu = %s, sigma = %s' % (output_stats.mu, output_stats.sigma))) self.output_stats = output_stats self._default_sigma_obs = nn.Parameter(torch.from_numpy(output_stats.sigma.astype(float)).to(self.device), requires_grad=False) self._latent_obj = GaussianLatentObject(self._latent_params, self._env_spec, self._dataset_train) assert hasattr(self, '_preproc_fn') assert hasattr(self, '_postproc_fn') assert hasattr(self, '_loss_fn') def latent_parameters(self): return self._latent_obj.parameters() def base_parameters(self): return self._net.parameters() def get_latent_mu_logsig(self): return self._latent_obj.get_latent_mu_logsig() def get_online_latent_mu_logsig(self): return self._latent_obj.get_online_latent_mu_logsig() def reset_latent_model(self): self._latent_obj.reset_online_latent_mu_logsig() def latent_loss(self, inputs, outputs, i=0, writer=None, writer_prefix=''): return self._latent_obj.loss(inputs, outputs, self._forward, i=i, writer=writer, writer_prefix=writer_prefix) def base_loss(self, inputs, outputs, i=0, writer=None, writer_prefix=''): model_outputs = self(inputs) if ((i > 0) and ((i % 1000) == 0)): logger.debug('') diff = to_numpy((model_outputs.next_obs - outputs.next_obs)) logger.debug('Mu0: {}'.format(self._latent_obj.mu_0.data)) logger.debug('Mu1: {}'.format(self._latent_obj.mu_1.data)) logger.debug('SHAPES: Obs {}, Act {}, Next Obs {}, Pred Next Obs {}'.format(inputs.obs.shape, inputs.act.shape, outputs.next_obs.shape, model_outputs.next_obs.shape)) logger.debug('VALS : Obs {}, Act {}, Next Obs {}, Pred Next Obs {}'.format(to_numpy(inputs.obs[0]), to_numpy(inputs.act[0]), to_numpy(outputs.next_obs[0]), to_numpy(model_outputs.next_obs[0]))) logger.debug('PRED ERROR: {}'.format(diff[(0, 0)])) logger.debug('SCALED ERROR: {}'.format((diff[(0, 0)] / self.output_stats.sigma_delta))) logger.debug('') loss = self._loss_fn(inputs, outputs, model_outputs) if writer: writer.add_scalar((writer_prefix + 'base_loss'), loss.item(), i) return loss def warm_start(self, model, observation, goal): raise NotImplementedError def _forward(self, inputs): inputs = self._preproc_fn(inputs) arrays = [] for key in (self._env_spec.observation_names + self._env_spec.action_names): arrays.append(inputs[key].view(inputs[key].shape[0], (- 1)).float().to(self.device)) torch_in = torch.cat(arrays, dim=1) batch_size = torch_in.shape[0] torch_out = self._net(torch_in).view(batch_size, self._num_nets, (- 1)) if self._is_probabilistic: (torch_mu, torch_sigma) = torch.chunk(torch_out, 2, dim=2) else: torch_mu = torch_out torch_sigma = (torch.ones_like(torch_mu) * (self._default_sigma_obs.float() * self._deterministic_sigma_multiplier)) outputs = AttrDict({'next_obs': torch_mu, 'next_obs_sigma': torch_sigma}) return self._postproc_fn(inputs, outputs) def forward(self, inputs, obs_lowd=None): inputs = inputs.copy() if (inputs.latent.numel() > 0): dist = self._latent_obj(inputs) inputs.latent = dist.sample return self._forward(inputs)
def to_planetoid(dataset): split_idx = dataset.get_idx_split('random', 0.25) (train_idx, valid_idx, test_idx) = (split_idx['train'], split_idx['valid'], split_idx['test']) (graph, label) = dataset[0] label = torch.squeeze(label) print('generate x') x = graph['node_feat'][train_idx].numpy() x = sp.csr_matrix(x) tx = graph['node_feat'][test_idx].numpy() tx = sp.csr_matrix(tx) allx = graph['node_feat'].numpy() allx = sp.csr_matrix(allx) y = F.one_hot(label[train_idx]).numpy() ty = F.one_hot(label[test_idx]).numpy() ally = F.one_hot(label).numpy() edge_index = graph['edge_index'].T graph = defaultdict(list) for i in range(0, label.shape[0]): graph[i].append(i) for (start_edge, end_edge) in edge_index: graph[start_edge.item()].append(end_edge.item()) return (x, tx, allx, y, ty, ally, graph, split_idx)
def get_frames(l): frames = [] for i in range(len(xs)): frames.append(go.Scatter(x=xs[i].tolist()[0:l], y=ys[i].tolist()[0:l], mode='markers+lines', name=('Packet ' + str((i + 1))), showlegend=False, hovertemplate=(('<b>X</b>: %{x}' + '<br><b>Y</b>: %{y}<br>') + '<b>Last Interaction: %{text}</b>'), text=[interaction_from_num.get(ints[i][j]) for j in range(len(xs[i]))], line=dict(color='darkslategrey'), marker=dict(opacity=[interaction_opacity_from_num.get(ints[i][j]) for j in range(len(xs[i]))], color=[interaction_color_from_num.get(ints[i][j]) for j in range(len(xs[i]))]))) return frames
def main(): (x_train, x_test, y_train, y_test) = load_data('twomoons') X = torch.cat([x_train, x_test]) if (y_train is not None): y = torch.cat([y_train, y_test]) else: y = None spectralreduction = SpectralReduction(n_components=2, should_use_ae=False, should_use_siamese=False, spectral_batch_size=712, spectral_epochs=40, spectral_is_local_scale=False, spectral_n_nbg=8, spectral_scale_k=2, spectral_lr=0.01, spectral_hiddens=[128, 128, 2]) X_new = spectralreduction.fit_transform(X) spectralreduction.visualize(X_new, y, n_components=1)
def get_opt(): parser = argparse.ArgumentParser(description='Run TOM model') parser.add_argument('--checkpoint', '-c', type=str, default='../result/TOM/gen_epoch_50.pth', help='checkpoint to load') parser.add_argument('--data_root', '-d', type=str, default='data', help='path to data root directory') parser.add_argument('--out_dir', '-o', type=str, default='../result', help='path to result directory') parser.add_argument('--name', '-n', type=str, default='TOM', help='model name') parser.add_argument('--batch_size', '-b', type=int, default=16, help='batch size') parser.add_argument('--n_worker', '-w', type=int, default=16, help='number of workers') parser.add_argument('--gpu_id', '-g', type=str, default='0', help='GPU ID') parser.add_argument('--log_freq', type=int, default=100, help='log frequency') parser.add_argument('--fine_width', type=int, default=192) parser.add_argument('--fine_height', type=int, default=256) parser.add_argument('--radius', type=int, default=5) parser.add_argument('--grid_size', type=int, default=5, help='hyperparameter for the network') opt = parser.parse_args() return opt
def test_loop_unrolling(): def called(A): A += 1 n = 5 def program(A): for i in dace.unroll(range(n)): called(A) A = np.random.rand(20) expected = (A + n) program(A) assert np.allclose(A, expected)
class SpatialAugmenterBase(dptaugmenterbase.AugmenterBase): def __init__(self, keyword): super().__init__(keyword=keyword)
def _unmangle(method_name): m = _re_name_and_sha.match(method_name) if m: return m.group(1) return method_name
class TestMinMaxStep(unittest.TestCase): def test_min_max_step(self): with warnings.catch_warnings(): warnings.simplefilter('ignore') for device in DEVICES: for min_step in (0, 2): for max_step in (float('inf'), 5): for (method, options) in [('dopri5', {}), ('scipy_solver', {'solver': 'LSODA'})]: options['min_step'] = min_step options['max_step'] = max_step (f, y0, t_points, sol) = construct_problem(device=device, ode='linear') torchdiffeq.odeint(f, y0, t_points, method=method, options=options) if (min_step > 0): self.assertLess(f.nfe, 50) else: self.assertGreater(f.nfe, 100)
def desolve_laplace(de, dvar, ics=None, ivar=None): if (isinstance(de, Expression) and de.is_relational()): de = (de.lhs() - de.rhs()) if (isinstance(dvar, Expression) and dvar.is_symbol()): raise ValueError("You have to declare dependent variable as a function evaluated at the independent variable, eg. y=function('y')(x)") if isinstance(dvar, list): (dvar, ivar) = dvar elif (ivar is None): ivars = de.variables() ivars = [t for t in ivars if (t is not dvar)] if (len(ivars) != 1): raise ValueError('Unable to determine independent variable, please specify.') ivar = ivars[0] dvar_str = str(dvar) def sanitize_var(exprs): return exprs.replace(("'" + dvar_str), dvar_str) de0 = de._maxima_() P = de0.parent() i = dvar_str.find('(') dvar_str = ((dvar_str[:(i + 1)] + '_SAGE_VAR_') + dvar_str[(i + 1):]) cmd = sanitize_var((((('desolve(' + de0.str()) + ',') + dvar_str) + ')')) soln = P(cmd).rhs() if (str(soln).strip() == 'false'): raise NotImplementedError('Maxima was unable to solve this ODE.') soln = soln.sage() if (ics is not None): d = len(ics) for i in range((d - 1)): soln = eval((('soln.substitute(diff(dvar,ivar,i)(' + str(ivar)) + '=ics[0])==ics[i+1])')) return soln
def IntVector(prefix, sz, ctx=None): ctx = _get_ctx(ctx) return [Int(('%s__%s' % (prefix, i)), ctx) for i in range(sz)]
def np_ify(tensor_or_other): if isinstance(tensor_or_other, ptu.TorchVariable): return ptu.get_numpy(tensor_or_other) else: return tensor_or_other
def substitute(graph: common.Graph, substitutions_list: List[common.BaseSubstitution]) -> common.Graph: for substitution in substitutions_list: matched_nodes = graph.filter(substitution.matcher_instance) for idn in matched_nodes: graph = substitution.substitute(graph, idn) return graph
def create_dataloader(logger): DATA_PATH = os.path.join('../', 'data') train_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS, split=cfg.TRAIN.SPLIT, mode='TRAIN', logger=logger, classes=cfg.CLASSES, rcnn_training_roi_dir=args.rcnn_training_roi_dir, rcnn_training_feature_dir=args.rcnn_training_feature_dir, gt_database_dir=args.gt_database) train_loader = DataLoader(train_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=True, collate_fn=train_set.collate_batch, drop_last=True) if args.train_with_eval: test_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS, split=cfg.TRAIN.VAL_SPLIT, mode='EVAL', logger=logger, classes=cfg.CLASSES, rcnn_eval_roi_dir=args.rcnn_eval_roi_dir, rcnn_eval_feature_dir=args.rcnn_eval_feature_dir) test_loader = DataLoader(test_set, batch_size=1, shuffle=True, pin_memory=True, num_workers=args.workers, collate_fn=test_set.collate_batch) else: test_loader = None return (train_loader, test_loader)
class Imputer(nn.Module): def __init__(self, arch=(784, 784)): super().__init__() self.fc2 = nn.Linear(arch[0], arch[1]) self.fc3 = nn.Linear(arch[1], arch[0]) self.fc4 = nn.Linear(arch[0], 784) self.transform = (lambda x: torch.sigmoid(x).view((- 1), 1, 28, 28)) def forward(self, input, data, mask): net = input.view(input.size(0), (- 1)) net = F.relu(self.fc1(net)) net = F.relu(self.fc2(net)) net = F.relu(self.fc3(net)) net = self.fc4(net) net = self.transform(net) return net
class GUDUDrp1smat(SpectralMatrix): def assemble(self, method): (test, trial) = (self.testfunction, self.trialfunction) assert isinstance(test[0], UD) assert isinstance(trial[0], UD) k = np.arange((test[0].N - 1)) d = {0: (((- 2) * (k + 1)) * (((k - 1) / ((2 * k) + 1)) + ((k + 3) / ((2 * k) + 3)))), 1: ((((- 2) * (k[1:] + 1)) * (k[1:] + 2)) / ((2 * k[1:]) + 1)), (- 1): ((((- 2) * k[:(- 1)]) * (k[:(- 1)] + 1)) / ((2 * k[:(- 1)]) + 3))} return d
def BFS(adj_lists: dict, s): prev = {} prev[s] = (None, None) q = queue.Queue() q.put(s) while (not q.empty()): v = q.get() for (u, d) in adj_lists[v]: if (u not in prev): prev[u] = (v, d) q.put(u) return prev
class SupportsCovarianceBetweenPoints(SupportsPredictJoint, Protocol): def covariance_between_points(self, query_points_1: TensorType, query_points_2: TensorType) -> TensorType: raise NotImplementedError
def experimental_warning(issue_number, message, stacklevel=4): warning(issue_number, message, FutureWarning, stacklevel)
class Token(): def __init__(self, type, value, pos): self.type = type self.value = value self.pos = pos def __str__(self): return f"""Token('{self.type}', "{self.value}")""" def __repr__(self): return self.__str__()
_module() class PanopticFPN(TwoStagePanopticSegmentor): 'Implementation of `Panoptic feature pyramid\n networks < def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, semantic_head=None, panoptic_fusion_head=None): super(PanopticFPN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head)
def append_df(df, df_to_append, ignore_index=True): new_df = df.append(df_to_append, ignore_index=ignore_index) return new_df
class LamaGI(Dataset, PRNGMixin): def __init__(self, **kwargs): self.clean_prob = kwargs.pop('clean_prob', (1.0 / kwargs['n_references'])) for k in default_mask_config: if (not (k in kwargs)): kwargs[k] = default_mask_config[k] self.base_data = make_default_train_dataset(**kwargs) def __len__(self): return len(self.base_data) def __getitem__(self, i): example = self.base_data[i] T = example['images'].shape[0] image = example['images'][0] mask = example['masks'][0] srcs = example['images'][1:] srcs_masks = example['masks'][1:] return {'image': image, 'mask': mask, 'srcs': srcs, 'srcs_masks': srcs_masks}
def retinanet_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): trainable_backbone_layers = _validate_trainable_layers((pretrained or pretrained_backbone), trainable_backbone_layers, 5, 3) print(f'trainable_backbone_layers: {trainable_backbone_layers}') if pretrained: pretrained_backbone = False backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(256, 256), trainable_layers=trainable_backbone_layers) model = RetinaNet(backbone, num_classes, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['retinanet_resnet50_fpn_coco'], progress=progress) if (num_classes == 2): print('Load person detection weights from COCO.') idx = [0, 1, 91, 92, 182, 183, 273, 274, 364, 365, 455, 456, 546, 547, 637, 638, 728, 789] state_dict['head.classification_head.cls_logits.weight'] = state_dict['head.classification_head.cls_logits.weight'][idx] state_dict['head.classification_head.cls_logits.bias'] = state_dict['head.classification_head.cls_logits.bias'][idx] model.load_state_dict(state_dict) overwrite_eps(model, 0.0) return model
def group_by_schema(data): result = collections.defaultdict(list) for example in data: result[example.schema.db_id].append(example) return result
def add_block_rc(ctx: LeanGenContext): if (ctx.rc_steps is None): return proof_lines = ctx.rc_steps.add_block_step(called_rc_builtin=ctx.func.rc, block_suffix=ctx.block_name_suffix, ap_rewrites=ctx.ap_rewrites) if (len(proof_lines) == 0): return for line in proof_lines: ctx.add_main(line) ctx.rewrites.append(ctx.rc_steps.rc_rw[(- 1)])
def get_model_v1(IMAGE_SIZE, NUM_CLASSES, EMB_SIZE, EFF_VER, order=0, weight_path=None): class ArcMarginProduct_v2(tf.keras.layers.Layer): def __init__(self, num_classes): super(ArcMarginProduct_v2, self).__init__() self.num_classes = num_classes def build(self, input_shape): self.w = self.add_variable('weights', shape=[int(input_shape[(- 1)]), self.num_classes]) def call(self, input): cosine = tf.matmul(tf.nn.l2_normalize(input, axis=1), tf.nn.l2_normalize(self.w, axis=0)) return cosine def getefn(): pretrained_model = EFNS[EFF_VER](weights=None, include_top=False, input_shape=[*IMAGE_SIZE, 3]) pretrained_model.trainable = True return pretrained_model def ArcFaceResNet(): x = inputs = tf.keras.Input([*IMAGE_SIZE, 3]) x = getefn()(x) x = L.GlobalAveragePooling2D()(x) x = L.Dense(EMB_SIZE, activation='swish')(x) target = ArcMarginProduct_v2(NUM_CLASSES)(x) model = tf.keras.Model(inputs, target) model.get_layer(('efficientnet-b' + str(EFF_VER)))._name = (('efficientnet-b' + str(EFF_VER)) + str(order)) return model model = ArcFaceResNet() model.summary() if (weight_path is not None): model.load_weights(weight_path) return model
class ProbabilityDictionary(object): dictionary: dict def get_segmentations(self, astype='dict', gold_array=None): top_1 = self.get_top_k(k=1) if (gold_array and (astype == 'list')): gold_df = pd.DataFrame([{'gold': x, 'characters': x.replace(' ', '')} for x in gold_array]) seg_df = pd.DataFrame([{'segmentation': x, 'characters': x.replace(' ', '')} for x in top_1]) output_df = pd.merge(gold_df, seg_df, how='left', on='characters') output_series = output_df['segmentation'].values.tolist() output_series = [str(x) for x in output_series] return output_series if (astype == 'dict'): return {k.replace(' ', ''): k for (k, v) in top_1.items()} elif (astype == 'list'): return list(top_1.keys()) def get_top_k(self, k=2, characters_field='characters', segmentation_field='segmentation', score_field='score', return_dataframe=False, fill=False): df = self.to_dataframe(characters_field=characters_field, segmentation_field=segmentation_field, score_field=score_field) df = df.sort_values(by=score_field, ascending=True).groupby(characters_field).head(k) if ((fill == False) and (return_dataframe == True)): return df elif ((fill == True) and (return_dataframe == True)): df['group_length'] = df.groupby(characters_field)[segmentation_field].transform(len) df['group_length'] = (((df['group_length'] * (- 1)) + k) + 1) len_array = df['group_length'].values df = df.drop(columns=['group_length']) records = np.array(df.to_dict('records')) cloned_records = list(np.repeat(records, len_array)) df = pd.DataFrame(cloned_records) return df elif ((fill == False) and (return_dataframe == False)): keys = df[segmentation_field].values values = df[score_field].values output = {k: v for (k, v) in list(zip(keys, values))} return output elif ((fill == True) and (return_dataframe == False)): raise NotImplementedError def to_dataframe(self, characters_field='characters', segmentation_field='segmentation', score_field='score'): df = [{characters_field: key.replace(' ', ''), segmentation_field: key, score_field: value} for (key, value) in self.dictionary.items()] df = pd.DataFrame(df) df = df.sort_values(by=[characters_field, score_field]) return df def to_csv(self, filename, characters_field='characters', segmentation_field='segmentation', score_field='score'): df = self.to_dataframe(characters_field=characters_field, segmentation_field=segmentation_field, score_field=score_field) df.to_csv(filename) def to_json(self, filepath): with open(filepath, 'w') as f: json.dump(self.dictionary, f)
_with_checks([LocalClassifierPerParentNode()]) def test_sklearn_compatible_estimator(estimator, check): check(estimator)
class Pose3(object): __slots__ = ['data'] def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, self.data) def __init__(self, R=None, t=None): rotation = (R if (R is not None) else Rot3()) if (t is None): t = [0.0, 0.0, 0.0] if isinstance(t, numpy.ndarray): if (t.shape in [(3, 1), (1, 3)]): t = t.flatten() elif (t.shape != (3,)): raise IndexError('Expected t to be a vector of length 3; instead had shape {}'.format(t.shape)) elif (len(t) != 3): raise IndexError('Expected t to be a sequence of length 3, was instead length {}.'.format(len(t))) if (not isinstance(rotation, Rot3)): raise ValueError('arg R has type {}; type {} expected'.format(type(R), Rot3)) self.data = (rotation.to_storage() + list(t)) def R(self): return self.rotation() def t(self): return self.position() def rotation(self): _self = self.data _res = ([0.0] * 4) _res[0] = _self[0] _res[1] = _self[1] _res[2] = _self[2] _res[3] = _self[3] return Rot3.from_storage(_res) def position(self): _self = self.data _res = numpy.zeros(3) _res[0] = _self[4] _res[1] = _self[5] _res[2] = _self[6] return _res def compose_with_point(self, right): _self = self.data if (right.shape == (3,)): right = right.reshape((3, 1)) elif (right.shape != (3, 1)): raise IndexError('right is expected to have shape (3, 1) or (3,); instead had shape {}'.format(right.shape)) _tmp0 = (2 * _self[2]) _tmp1 = (_self[3] * _tmp0) _tmp2 = ((2 * _self[0]) * _self[1]) _tmp3 = ((- 2) * (_self[1] ** 2)) _tmp4 = (1 - (2 * (_self[2] ** 2))) _tmp5 = (_self[0] * _tmp0) _tmp6 = (2 * _self[3]) _tmp7 = (_self[1] * _tmp6) _tmp8 = ((- 2) * (_self[0] ** 2)) _tmp9 = (_self[0] * _tmp6) _tmp10 = (_self[1] * _tmp0) _res = numpy.zeros(3) _res[0] = (((_self[4] + (right[(0, 0)] * (_tmp3 + _tmp4))) + (right[(1, 0)] * ((- _tmp1) + _tmp2))) + (right[(2, 0)] * (_tmp5 + _tmp7))) _res[1] = (((_self[5] + (right[(0, 0)] * (_tmp1 + _tmp2))) + (right[(1, 0)] * (_tmp4 + _tmp8))) + (right[(2, 0)] * (_tmp10 - _tmp9))) _res[2] = (((_self[6] + (right[(0, 0)] * (_tmp5 - _tmp7))) + (right[(1, 0)] * (_tmp10 + _tmp9))) + (right[(2, 0)] * ((_tmp3 + _tmp8) + 1))) return _res def inverse_compose(self, point): _self = self.data if (point.shape == (3,)): point = point.reshape((3, 1)) elif (point.shape != (3, 1)): raise IndexError('point is expected to have shape (3, 1) or (3,); instead had shape {}'.format(point.shape)) _tmp0 = (2 * _self[2]) _tmp1 = (_self[3] * _tmp0) _tmp2 = ((2 * _self[0]) * _self[1]) _tmp3 = (_tmp1 + _tmp2) _tmp4 = ((- 2) * (_self[1] ** 2)) _tmp5 = (1 - (2 * (_self[2] ** 2))) _tmp6 = (_tmp4 + _tmp5) _tmp7 = (_self[0] * _tmp0) _tmp8 = (2 * _self[3]) _tmp9 = (_self[1] * _tmp8) _tmp10 = (_tmp7 - _tmp9) _tmp11 = ((- _tmp1) + _tmp2) _tmp12 = ((- 2) * (_self[0] ** 2)) _tmp13 = (_tmp12 + _tmp5) _tmp14 = (_self[0] * _tmp8) _tmp15 = (_self[1] * _tmp0) _tmp16 = (_tmp14 + _tmp15) _tmp17 = ((_tmp12 + _tmp4) + 1) _tmp18 = (_tmp7 + _tmp9) _tmp19 = ((- _tmp14) + _tmp15) _res = numpy.zeros(3) _res[0] = (((((((- _self[4]) * _tmp6) - (_self[5] * _tmp3)) - (_self[6] * _tmp10)) + (_tmp10 * point[(2, 0)])) + (_tmp3 * point[(1, 0)])) + (_tmp6 * point[(0, 0)])) _res[1] = (((((((- _self[4]) * _tmp11) - (_self[5] * _tmp13)) - (_self[6] * _tmp16)) + (_tmp11 * point[(0, 0)])) + (_tmp13 * point[(1, 0)])) + (_tmp16 * point[(2, 0)])) _res[2] = (((((((- _self[4]) * _tmp18) - (_self[5] * _tmp19)) - (_self[6] * _tmp17)) + (_tmp17 * point[(2, 0)])) + (_tmp18 * point[(0, 0)])) + (_tmp19 * point[(1, 0)])) return _res def to_homogenous_matrix(self): _self = self.data _tmp0 = ((- 2) * (_self[1] ** 2)) _tmp1 = (1 - (2 * (_self[2] ** 2))) _tmp2 = ((2 * _self[2]) * _self[3]) _tmp3 = (2 * _self[1]) _tmp4 = (_self[0] * _tmp3) _tmp5 = (2 * _self[0]) _tmp6 = (_self[2] * _tmp5) _tmp7 = (_self[3] * _tmp3) _tmp8 = ((- 2) * (_self[0] ** 2)) _tmp9 = (_self[3] * _tmp5) _tmp10 = (_self[2] * _tmp3) _res = numpy.zeros((4, 4)) _res[(0, 0)] = (_tmp0 + _tmp1) _res[(1, 0)] = (_tmp2 + _tmp4) _res[(2, 0)] = (_tmp6 - _tmp7) _res[(3, 0)] = 0 _res[(0, 1)] = ((- _tmp2) + _tmp4) _res[(1, 1)] = (_tmp1 + _tmp8) _res[(2, 1)] = (_tmp10 + _tmp9) _res[(3, 1)] = 0 _res[(0, 2)] = (_tmp6 + _tmp7) _res[(1, 2)] = (_tmp10 - _tmp9) _res[(2, 2)] = ((_tmp0 + _tmp8) + 1) _res[(3, 2)] = 0 _res[(0, 3)] = _self[4] _res[(1, 3)] = _self[5] _res[(2, 3)] = _self[6] _res[(3, 3)] = 1 return _res def storage_dim(): return 7 def to_storage(self): return list(self.data) def from_storage(cls, vec): instance = cls.__new__(cls) if isinstance(vec, list): instance.data = vec else: instance.data = list(vec) if (len(vec) != cls.storage_dim()): raise ValueError('{} has storage dim {}, got {}.'.format(cls.__name__, cls.storage_dim(), len(vec))) return instance def identity(cls): return ops.GroupOps.identity() def inverse(self): return ops.GroupOps.inverse(self) def compose(self, b): return ops.GroupOps.compose(self, b) def between(self, b): return ops.GroupOps.between(self, b) def tangent_dim(): return 6 def from_tangent(cls, vec, epsilon=1e-08): if (len(vec) != cls.tangent_dim()): raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), cls.tangent_dim())) return ops.LieGroupOps.from_tangent(vec, epsilon) def to_tangent(self, epsilon=1e-08): return ops.LieGroupOps.to_tangent(self, epsilon) def retract(self, vec, epsilon=1e-08): if (len(vec) != self.tangent_dim()): raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), self.tangent_dim())) return ops.LieGroupOps.retract(self, vec, epsilon) def local_coordinates(self, b, epsilon=1e-08): return ops.LieGroupOps.local_coordinates(self, b, epsilon) def interpolate(self, b, alpha, epsilon=1e-08): return ops.LieGroupOps.interpolate(self, b, alpha, epsilon) def __eq__(self, other): if isinstance(other, Pose3): return (self.data == other.data) else: return False def __mul__(self, other): pass def __mul__(self, other): pass def __mul__(self, other): if isinstance(other, Pose3): return self.compose(other) elif (isinstance(other, numpy.ndarray) and hasattr(self, 'compose_with_point')): return getattr(self, 'compose_with_point')(other).reshape(other.shape) else: raise NotImplementedError('Cannot compose {} with {}.'.format(type(self), type(other)))
class TwoAlgebraicForms(SeveralAlgebraicForms): def first(self): return self._forms[0] def second(self): return self._forms[1]
def _get_module_class(mod, clsname): if (mod is None): return None return getattr(mod, clsname)
_params(node_type=dace.libraries.standard.nodes.Reduce, name='pure') class ReverseReduce(BackwardImplementation): def backward_can_be_applied(node: Node, state: SDFGState, sdfg: SDFG) -> bool: reduction_type = detect_reduction_type(node.wcr) if (reduction_type is not dtypes.ReductionType.Sum): return False return True def backward(forward_node: Node, context: BackwardContext, given_gradients: typing.List[typing.Optional[str]], required_gradients: typing.List[typing.Optional[str]]) -> typing.Tuple[(Node, BackwardResult)]: reduction_type = detect_reduction_type(forward_node.wcr) if (len(given_gradients) != 1): raise AutoDiffException('recieved invalid SDFG: reduce node {} should have exactly one output edge'.format(forward_node)) if (len(required_gradients) != 1): raise AutoDiffException('recieved invalid SDFG: reduce node {} should have exactly one input edge'.format(forward_node)) input_name = next(iter(required_gradients)) in_desc = in_desc_with_name(forward_node, context.forward_state, context.forward_sdfg, input_name) output_name = next(iter(given_gradients)) out_desc = out_desc_with_name(forward_node, context.forward_state, context.forward_sdfg, output_name) all_axes: typing.List[int] = list(range(len(in_desc.shape))) reduce_axes: typing.List[int] = (all_axes if (forward_node.axes is None) else forward_node.axes) non_reduce_axes: typing.List[int] = [i for i in all_axes if (i not in reduce_axes)] result = BackwardResult.empty() if (reduction_type is dtypes.ReductionType.Sum): sdfg = SDFG((('_reverse_' + str(reduction_type).replace('.', '_')) + '_')) state = sdfg.add_state() rev_input_conn_name = 'input_gradient' rev_output_conn_name = 'output_gradient' result.required_grad_names[output_name] = rev_output_conn_name result.given_grad_names[input_name] = rev_input_conn_name (_, rev_input_arr) = sdfg.add_array(rev_input_conn_name, shape=out_desc.shape, dtype=out_desc.dtype) (_, rev_output_arr) = sdfg.add_array(rev_output_conn_name, shape=in_desc.shape, dtype=in_desc.dtype) reduce_all_axes = ((forward_node.axes is None) or (set(range(len(in_desc.shape))) == set(forward_node.axes))) state.add_mapped_tasklet((('_distribute_grad_' + str(reduction_type).replace('.', '_')) + '_'), {('i' + str(i)): '0:{}'.format(shape) for (i, shape) in enumerate(in_desc.shape)}, {'__in': Memlet.simple(rev_input_conn_name, ('0' if reduce_all_axes else ','.join((('i' + str(i)) for i in non_reduce_axes))))}, '__out = __in', {'__out': Memlet.simple(rev_output_conn_name, ','.join((('i' + str(i)) for i in all_axes)))}, external_edges=True) return (context.backward_state.add_nested_sdfg(sdfg, None, {rev_input_conn_name}, {rev_output_conn_name}), result) else: raise AutoDiffException("Unsupported reduction type '{}'".format(reduction_type))
class VGG19(nn.Module): def __init__(self, n_inputs=12, numCls=17): super().__init__() vgg = models.vgg19(pretrained=False) self.encoder = nn.Sequential(nn.Conv2d(n_inputs, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), *vgg.features[1:]) self.classifier = nn.Sequential(nn.Linear(((8 * 8) * 512), 4096, bias=True), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096, bias=True), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, numCls, bias=True)) self.apply(weights_init_kaiming) self.apply(fc_init_weights) def forward(self, x): x = self.encoder(x) x = x.view(x.size(0), (- 1)) logits = self.classifier(x) return logits
def _info(obj, output=sys.stdout): extra = '' tic = '' bp = (lambda x: x) cls = getattr(obj, '__class__', type(obj)) nm = getattr(cls, '__name__', cls) strides = obj.strides endian = obj.dtype.byteorder print('class: ', nm, file=output) print('shape: ', obj.shape, file=output) print('strides: ', strides, file=output) print('itemsize: ', obj.itemsize, file=output) print('aligned: ', bp(obj.flags.aligned), file=output) print('contiguous: ', bp(obj.flags.contiguous), file=output) print('fortran: ', obj.flags.fortran, file=output) print(('data pointer: %s%s' % (hex(obj.ctypes._as_parameter_.value), extra)), file=output) print('byteorder: ', end=' ', file=output) if (endian in ['|', '=']): print(('%s%s%s' % (tic, sys.byteorder, tic)), file=output) byteswap = False elif (endian == '>'): print(('%sbig%s' % (tic, tic)), file=output) byteswap = (sys.byteorder != 'big') else: print(('%slittle%s' % (tic, tic)), file=output) byteswap = (sys.byteorder != 'little') print('byteswap: ', bp(byteswap), file=output) print(('type: %s' % obj.dtype), file=output)
class Attention(tf.keras.layers.Layer): def __init__(self, encoder_dim, decoder_dim, score_mode='general'): super().__init__() self.encoder_dim = encoder_dim self.decoder_dim = decoder_dim self.score_mode = score_mode self.permuate_1_2 = tf.keras.layers.Permute((2, 1)) if (self.score_mode == 'general'): self.attn = tf.keras.layers.Dense(self.encoder_dim, use_bias=False) self.output_layer = tf.keras.layers.Dense(self.decoder_dim) def score(self, query, key, mask, dec_len): if (self.score_mode == 'general'): attn_weights = tf.matmul(self.attn(query), self.permuate_1_2(key)) elif (self.score_mode == 'concat'): pass elif (self.score_mode == 'dot'): attn_weights = tf.matmul(query, self.permuate_1_2(key)) mask = tf.repeat(tf.expand_dims(mask, 1), repeats=dec_len, axis=1) attn_weights = mask_fill(attn_weights, mask, (- 1e+20)) attn_weights = tf.nn.softmax(attn_weights, axis=2) return attn_weights def call(self, decoder_output, encoder_output, enc_mask, dec_len): attn_weights = self.score(decoder_output, encoder_output, enc_mask, dec_len) context_embed = tf.matmul(attn_weights, encoder_output) attn_output = tf.tanh(self.output_layer(tf.concat([context_embed, decoder_output], axis=(- 1)))) return attn_output
def example(): task = generate_task(task_generator_id='picking') env = CausalWorld(task=task, enable_visualization=True) env = ObjectSelectorWrapper(env) for _ in range(50): obs = env.reset() for i in range(70): (obs, reward, done, info) = env.step([0, 1, 0]) for i in range(20): (obs, reward, done, info) = env.step([0, 0, 1]) for i in range(50): (obs, reward, done, info) = env.step([0, 5, 0]) for i in range(20): (obs, reward, done, info) = env.step([0, 0, 1]) for i in range(50): (obs, reward, done, info) = env.step([0, 2, 0]) env.close()
def main(): try: cls = getattr(problem, sys.argv[1]) except: available_problems = [name for name in dir(problem) if ((not name.startswith('_')) and isinstance(getattr(problem, name), type))] print(traceback.format_exc()) print(f'''Usage: 1. s3prl-main [PROBLEM] -h 2. python3 -m s3prl.main [PROBLEM] -h 3. python3 s3prl/main.py [PROBLEM] -h PROBLEM should be an available class name in the s3prl.problem package. Available options: {', '.join(available_problems)}''') exit(0) cls().main(sys.argv[2:])
class ResnetBlockGroupNormShallowConv1d(nn.Module): def __init__(self, size_in, groups, gn_groups=4, size_out=None, size_h=None, dropout_prob=0.0, leaky=False): super().__init__() if (size_out is None): size_out = size_in if (size_h is None): size_h = min(size_in, size_out) if (dropout_prob > 0.0): self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out self.gn_0 = GroupNorm1d((groups * gn_groups), size_in) self.fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) if (not leaky): self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if (size_in == size_out): self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) def forward(self, x): if (self.dropout is not None): dx = self.fc_0(self.dropout(self.actvn(self.gn_0(x)))) else: dx = self.fc_0(self.actvn(self.gn_0(x))) if (self.shortcut is not None): x_s = self.shortcut(x) else: x_s = x return (x_s + dx)
def _c(alf, bet, i, j): f = ((((((sp.S(2) * ((i + alf) + 1)) * ((i + bet) + sp.S(1))) * (((i + alf) + bet) + sp.S(2))) / (((((sp.S(2) * i) + alf) + bet) + sp.S(2)) * ((((sp.S(2) * i) + alf) + bet) + sp.S(3)))) * delta(i, (j - 1))) - (((((sp.S(2) * (i - sp.S(1))) * i) * ((i + alf) + bet)) / (((((sp.S(2) * i) + alf) + bet) - sp.S(1)) * (((sp.S(2) * i) + alf) + bet))) * delta(i, (j + 1)))) if (alf != bet): f += (((((sp.S(2) * i) * (alf - bet)) * (((i + alf) + bet) + sp.S(1))) / ((((sp.S(2) * i) + alf) + bet) * ((((sp.S(2) * i) + alf) + bet) + sp.S(2)))) * delta(i, j)) return f
def local_cat_all_gather(tensors): tensors_gather = [torch.ones_like(tensors) for _ in range(get_local_size())] torch.distributed.all_gather(tensors_gather, tensors, async_op=False, group=_LOCAL_PROCESS_GROUP) output = torch.cat(tensors_gather, dim=0) return output
def is_relative_local_path(path: str): path_str = os.fsdecode(path) return (('://' not in path_str) and (not os.path.isabs(path)))
def extract_times(filename): io = MeshIO.any_from_filename(filename) (steps, times, nts) = io.read_times() dts = nm.ediff1d(times, to_end=0) return (steps, times, nts, dts)
def W_spiky_gradient(R, h): r = R.norm() res = ti.Vector([0.0, 0.0, 0.0]) if (r == 0.0): res = ti.Vector([0.0, 0.0, 0.0]) elif (r <= h): h3 = ((h * h) * h) h6 = (h3 * h3) h_r = (h - r) res = (((((- 45.0) / (pi * h6)) * h_r) * h_r) * (R / r)) else: res = ti.Vector([0.0, 0.0, 0.0]) return res
def check_folders(*folders): for folder in folders: if (not os.path.exists(folder)): return False return True
def convert_checks(ctx: click.core.Context, param: click.core.Parameter, value: tuple[list[str]]) -> list[str]: return sum(value, [])
class Speech2TextFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_features', 'attention_mask'] def __init__(self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, do_ceptral_normalize=True, normalize_means=True, normalize_vars=True, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.do_ceptral_normalize = do_ceptral_normalize self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.return_attention_mask = True def _extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray: waveform = (waveform * (2 ** 15)) waveform = torch.from_numpy(waveform).unsqueeze(0) features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) return features.numpy() def utterance_cmvn(x: np.ndarray, input_length: int, normalize_means: Optional[bool]=True, normalize_vars: Optional[bool]=True, padding_value: float=0.0) -> np.ndarray: if normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if (input_length < x.shape[0]): x[input_length:] = padding_value x = x.astype(np.float32) return x def normalize(self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray]=None) -> List[np.ndarray]: lengths = (attention_mask.sum((- 1)) if (attention_mask is not None) else [x.shape[0] for x in input_features]) return [self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value) for (x, n) in zip(input_features, lengths)] def __call__(self, raw_speech: Union[(np.ndarray, List[float], List[np.ndarray], List[List[float]])], padding: Union[(bool, str, PaddingStrategy)]=False, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, sampling_rate: Optional[int]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchFeature: if (sampling_rate is not None): if (sampling_rate != self.sampling_rate): raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched = bool((isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list))))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif ((not is_batched) and (not isinstance(raw_speech, np.ndarray))): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif (isinstance(raw_speech, np.ndarray) and (raw_speech.dtype is np.dtype(np.float64))): raw_speech = raw_speech.astype(np.float32) if (not is_batched): raw_speech = [raw_speech] features = [self._extract_fbank_features(waveform) for waveform in raw_speech] encoded_inputs = BatchFeature({'input_features': features}) padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs) input_features = padded_inputs.get('input_features') if isinstance(input_features[0], list): padded_inputs['input_features'] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get('attention_mask') if (attention_mask is not None): padded_inputs['attention_mask'] = [np.asarray(array, dtype=np.int32) for array in attention_mask] if self.do_ceptral_normalize: attention_mask = (np.array(attention_mask, dtype=np.int32) if (self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD) else None) padded_inputs['input_features'] = self.normalize(padded_inputs['input_features'], attention_mask=attention_mask) if (return_tensors is not None): padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
class ConvEncoder(nn.Module): def __init__(self, latent_size, flow_depth=2, logprob=False): super().__init__() if logprob: self.encode_func = self.encode_logprob else: self.encode_func = self.encode dim = 64 self.ls = nn.Sequential(nn.Conv2d(3, dim, 5, 2, 2), nn.LeakyReLU(0.2), conv_ln_lrelu(dim, (dim * 2)), conv_ln_lrelu((dim * 2), (dim * 4)), conv_ln_lrelu((dim * 4), (dim * 8)), nn.Conv2d((dim * 8), latent_size, 4)) if (flow_depth > 0): hidden_size = (latent_size * 2) flow_layers = [flow.InverseAutoregressiveFlow(latent_size, hidden_size, latent_size) for _ in range(flow_depth)] flow_layers.append(flow.Reverse(latent_size)) self.q_z_flow = flow.FlowSequential(*flow_layers) self.enc_chunk = 3 else: self.q_z_flow = None self.enc_chunk = 2 fc_out_size = (latent_size * self.enc_chunk) self.fc = nn.Sequential(nn.Linear(latent_size, fc_out_size), nn.LayerNorm(fc_out_size), nn.LeakyReLU(0.2), nn.Linear(fc_out_size, fc_out_size)) def forward(self, input, k_samples=5): return self.encode_func(input, k_samples) def encode_logprob(self, input, k_samples=5): x = self.ls(input) x = x.view(input.shape[0], (- 1)) fc_out = self.fc(x).chunk(self.enc_chunk, dim=1) (mu, logvar) = fc_out[:2] std = F.softplus(logvar) qz_x = Normal(mu, std) z = qz_x.rsample([k_samples]) log_q_z = qz_x.log_prob(z) if self.q_z_flow: (z, log_q_z_flow) = self.q_z_flow(z, context=fc_out[2]) log_q_z = (log_q_z + log_q_z_flow).sum((- 1)) else: log_q_z = log_q_z.sum((- 1)) return (z, log_q_z) def encode(self, input, _): x = self.ls(input) x = x.view(input.shape[0], (- 1)) fc_out = self.fc(x).chunk(self.enc_chunk, dim=1) (mu, logvar) = fc_out[:2] std = F.softplus(logvar) qz_x = Normal(mu, std) z = qz_x.rsample() if self.q_z_flow: (z, _) = self.q_z_flow(z, context=fc_out[2]) return z
def distinct(seqs): batch_size = len(seqs) (intra_dist1, intra_dist2) = ([], []) (unigrams_all, bigrams_all) = (Counter(), Counter()) for seq in seqs: unigrams = Counter(seq) bigrams = Counter(zip(seq, seq[1:])) intra_dist1.append(((len(unigrams) + 1e-12) / (len(seq) + 1e-05))) intra_dist2.append(((len(bigrams) + 1e-12) / (max(0, (len(seq) - 1)) + 1e-05))) unigrams_all.update(unigrams) bigrams_all.update(bigrams) inter_dist1 = ((len(unigrams_all) + 1e-12) / (sum(unigrams_all.values()) + 1e-05)) inter_dist2 = ((len(bigrams_all) + 1e-12) / (sum(bigrams_all.values()) + 1e-05)) intra_dist1 = np.average(intra_dist1) intra_dist2 = np.average(intra_dist2) return (intra_dist1, intra_dist2, inter_dist1, inter_dist2)
def dla34(cfg, pretrained=None, **kwargs): model = DLA(cfg, [1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=BasicBlock, **kwargs) if (pretrained is not None): model.load_pretrained_model(pretrained, 'dla34') return model
class EvernoteManagerDeleteNotebook(VirtualFunctionTool): name = 'EvernoteManagerDeleteNotebook' summary = 'Delete a notebook by its unique identifier.' parameters: List[ArgParameter] = [{'name': 'notebook_id', 'type': 'string', 'description': 'The unique identifier of the notebook.', 'required': True}] returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the deletion was successful.'}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': 'The notebook_id does not exist.'}]
def add_csc_loss2(model): loss_gradients = {} if cfg.WSL.MASK_SOFTMAX: model.net.Split(['mask_fc8_up'], ['mask_fc_up_split', 'mask_fc_up_split_notuse'], split=[1, (model.num_classes - 1)], axis=1) model.net.CPGScale(['mask_fc8_up_split', 'labels_oh', 'cls_prob'], 'mask_fc8_up_scale', tau=cfg.WSL.CPG_TAU) else: model.net.CPGScale(['mask_fc8_up', 'labels_oh', 'cls_prob'], 'mask_fc8_up_scale', tau=cfg.WSL.CPG_TAU) loss_gradients_back = wsl_head.add_csc_loss(model, 'mask_fc8_up_scale', 'cls_prob', prefix='mask_', loss_weight=0.01) loss_gradients.update(loss_gradients_back) loss_gradients_csc = wsl_head.add_csc_loss(model, 'cpg', 'cls_prob', prefix='') loss_gradients.update(loss_gradients_csc) return loss_gradients
class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.reshape(x.size(0), (- 1)) x = self.fc(x) return x
class PAM(_SelfAttentionBlock): def __init__(self, in_channels, channels): super(PAM, self).__init__(key_in_channels=in_channels, query_in_channels=in_channels, channels=channels, out_channels=in_channels, share_key_query=False, query_downsample=None, key_downsample=None, key_query_num_convs=1, key_query_norm=False, value_out_num_convs=1, value_out_norm=False, matmul_norm=False, with_out=False, conv_cfg=None, norm_cfg=None, act_cfg=None) self.gamma = Scale(0) def forward(self, x): out = super(PAM, self).forward(x, x) out = (self.gamma(out) + x) return out
def getScores(directory, baselineType='WEIGHTED'): cascscores = [] trueResult = getTrueResult() baselineScore = getBaselineScore(baselineType) print(baselineType, baselineScore) baselines = [baselineScore for _ in range(len(GAMMAS))] labels = [float(v) for v in GAMMAS] for g in GAMMAS: motifName = ('%s/%s/motifs.pkl' % (directory, g)) motifRanksName = ('%s/%s/motifRanked.pkl' % (directory, g)) motifResult = getMotifResult(motifName, motifRanksName) cascscores.append(f1_score(trueResult, motifResult)) plt.figure(1, figsize=(7, 4)) print(cascscores) plt.plot(GAMMAS, cascscores, '-bv', label='CASC') plt.xlabel('$\\gamma$') plt.ylim(ymin=0.2, ymax=1) plt.ylabel('F1 Score') plt.legend(loc='lower center', ncol=2, fancybox=False, edgecolor='black') plt.show()
class AdetCheckpointer(DetectionCheckpointer): def _load_file(self, filename): if filename.endswith('.pkl'): with PathManager.open(filename, 'rb') as f: data = pickle.load(f, encoding='latin1') if (('model' in data) and ('__author__' in data)): self.logger.info("Reading a file from '{}'".format(data['__author__'])) return data else: if ('blobs' in data): data = data['blobs'] data = {k: v for (k, v) in data.items() if (not k.endswith('_momentum'))} if ('weight_order' in data): del data['weight_order'] return {'model': data, '__author__': 'Caffe2', 'matching_heuristics': True} loaded = super()._load_file(filename) if ('model' not in loaded): loaded = {'model': loaded} basename = os.path.basename(filename).lower() if (('lpf' in basename) or ('dla' in basename)): loaded['matching_heuristics'] = True return loaded
_window.route('/chat', methods=['GET']) def run_client(): say_hi = orchestrator.policy_layer.response.greeting() cid = str(uuid.uuid4()) dmgr.get_or_create_ctx(cid, orchestrator.policy_layer.state_manager.entity_manager.entity_config, orchestrator.policy_layer.state_manager.task_config, orchestrator.policy_layer.bot_config) return render_template('chat.html', greetings=say_hi, cid=cid)
class CellularBasis(CombinatorialFreeModule): def __init__(self, A, to_algebra=None, from_algebra=None, **kwargs): self._algebra = A I = [(la, s, t) for la in A.cell_poset() for s in A.cell_module_indices(la) for t in A.cell_module_indices(la)] prefix = kwargs.pop('prefix', 'C') cat = Algebras(A.category().base_ring()).FiniteDimensional().WithBasis().Cellular() CombinatorialFreeModule.__init__(self, A.base_ring(), I, prefix=prefix, bracket=False, category=cat, **kwargs) if (from_algebra is None): from_algebra = A._to_cellular_element if (to_algebra is None): to_algebra = A._from_cellular_index if (from_algebra is not NotImplemented): to_cellular = A.module_morphism(from_algebra, codomain=self, category=cat) if (to_algebra is NotImplemented): from_cellular = (~ to_cellular) else: from_cellular = self.module_morphism(to_algebra, codomain=A, category=cat) if (from_algebra is NotImplemented): to_cellular = (~ from_cellular) to_cellular.register_as_coercion() from_cellular.register_as_coercion() def _repr_(self): return 'Cellular basis of {}'.format(self._algebra) def _latex_term(self, x): from sage.misc.latex import latex la = x[0] m = (x[1], x[2]) sla = latex(la) if (sla.find('\\text{\\textt') != (- 1)): sla = str(la) sm = latex(m) if (sm.find('\\text{\\textt') != (- 1)): sm = str(m) return ('C^{%s}_{%s}' % (sla, sm)) def cellular_basis_of(self): return self._algebra def cell_poset(self): return self._algebra.cell_poset() def cell_module_indices(self, la): return self._algebra.cell_module_indices(la) def cellular_basis(self): return self _method def one(self): return self(self._algebra.one()) _method def product_on_basis(self, x, y): A = self._algebra return self((A(self.monomial(x)) * A(self.monomial(y))))
.async_execution def async_add_nested(to, x, y, z): return rpc.rpc_async(to, async_add, args=(to, x, y)).then((lambda fut: (fut.wait() + z)))
def load_json_file(file_path): with open(file_path, 'r') as f: data = json.load(f) return data
def validate_gemv_sdfg(csdfg, matrix_shape, x_shape, y_shape): A = rand_float(matrix_shape) x = rand_float(x_shape) y = rand_float(y_shape) expect = np.matmul(A, x) csdfg(A=A, x=x, y=y, M=(matrix_shape[0] * matrix_shape[1]), N=matrix_shape[2]) if (len(y_shape) == 1): y = np.reshape(y, [matrix_shape[0], matrix_shape[1]]) assert np.allclose(y, expect)
def test_IndexedArray_NumpyArray(): ak_array_in = ak.contents.indexedarray.IndexedArray(ak.index.Index(np.array([2, 2, 0, 1, 4, 5, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))) data_frame = ak.to_rdataframe({'x': ak_array_in}) assert (data_frame.GetColumnType('x') == 'double')
def get_bl_example(ann, scene): boxes = [] boxes_seq = [] origin_sent = ann['fullAnswer'] origin_sent = re.sub('(?:^Yes,)|(?:^No,)', '', origin_sent).strip() sent = list(origin_sent.split()) for (span, rids_str) in ann['annotations']['fullAnswer'].items(): span = tuple(map(int, span.split(':'))) if (len(span) == 1): span = [span[0], (span[0] + 1)] sent[(span[1] - 1)] = f'{sent[(span[1] - 1)]}{BOXES_PLACEHOLDER}' rids = rids_str.split(',') boxes_idx = add_boxes_by_rids(boxes, rids, scene) boxes_seq.append(boxes_idx) answer = ''.join(sent) answer += f"The answer is {ann['answer']}." return (boxes, answer, boxes_seq)
def accuracy(y_true, y_pred, weights=None): score = (y_true == y_pred) return np.average(score, weights=weights)
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_cn_ric(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format == 'compact'): result = ([ric.compact(val)] + result) elif (output_format == 'standard'): result = ([ric.format(val)] + result) elif (output_format == 'birthdate'): result = ([ric.get_birth_date(val)] + result) elif (output_format == 'birthplace'): result = ([ric.get_birth_place(val)] + result) return result
def assert_schema(target, expected): if ('enum' in expected): assert (len(target) == 1) assert (sorted(target['enum']) == sorted(expected['enum'])) else: assert (target == expected)
def gemm_distr2(alpha: dc.float64, beta: dc.float64, C: dc.float64[(lNI, lNJ)], A: dc.float64[(lNI, lNKa)], B: dc.float64[(lNKb, lNJ)]): tmp = distr.MatMult(A, B, ((lNI * Px), (lNJ * Py), NK)) C[:] = ((alpha * tmp) + (beta * C))
def dump_framework(): acc = [91.3, 83.3] for (i, dataset) in enumerate(['Yelp', 'SST']): for (j, p) in enumerate(['1', '2', '\\infty']): if (j == 0): print(('\\multirow{3}{*}{%s} & \\multirow{3}{*}{%.2f}' % (dataset, acc[i])), end='') else: print('& ', end='') print((' & $\\ell_%s$ & ' % p), end='') for (k, method) in enumerate(['forward', 'backward', 'baf']): if (k > 0): print(' & ', end='') _p = (p if (p != '\\infty') else '100') res = load_result('results/res_model_{}_small_1_{}_{}_1.json'.format(dataset.lower(), method, p_list[j])) avg_time = 0 for example in res['examples']: avg_time += example['time'] avg_time /= len(res['examples']) print(('%.3f & %.3f & %.1f ' % (res['minimum'], res['average'], avg_time)), end='') print('\\\\') print('\\hline')
class CythonDotParallel(object): __all__ = ['parallel', 'prange', 'threadid'] def parallel(self, num_threads=None): return nogil def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None): if (stop is None): stop = start start = 0 return range(start, stop, step) def threadid(self): return 0
def _mathics_sympysage_symbol(self): from sage.symbolic.ring import SR try: name = self.name if name.startswith('_Mathics_User_'): name = name.split('`')[1] if (name == mathics._true_symbol()): return True if (name == mathics._false_symbol()): return False return SR.var(name) except ValueError: return SR.var(str(self))
def _get_voc_results_file_template(json_dataset, salt): info = voc_info(json_dataset) year = info['year'] image_set = info['image_set'] devkit_path = info['devkit_path'] filename = (((('comp4' + salt) + '_det_') + image_set) + '_{:s}.txt') return os.path.join(devkit_path, 'results', ('VOC' + year), 'Main', filename)
def test_vi(): im_true = np.array([1, 2, 3, 4]) im_test = np.array([1, 1, 8, 8]) assert_equal(np.sum(variation_of_information(im_true, im_test)), 1)
def BiggsSmithGraph(embedding=1): L = [16, 24, (- 38), 17, 34, 48, (- 19), 41, (- 35), 47, (- 20), 34, (- 36), 21, 14, 48, (- 16), (- 36), (- 43), 28, (- 17), 21, 29, (- 43), 46, (- 24), 28, (- 38), (- 14), (- 50), (- 45), 21, 8, 27, (- 21), 20, (- 37), 39, (- 34), (- 44), (- 8), 38, (- 21), 25, 15, (- 34), 18, (- 28), (- 41), 36, 8, (- 29), (- 21), (- 48), (- 28), (- 20), (- 47), 14, (- 8), (- 15), (- 27), 38, 24, (- 48), (- 18), 25, 38, 31, (- 25), 24, (- 46), (- 14), 28, 11, 21, 35, (- 39), 43, 36, (- 38), 14, 50, 43, 36, (- 11), (- 36), (- 24), 45, 8, 19, (- 25), 38, 20, (- 24), (- 14), (- 21), (- 8), 44, (- 31), (- 38), (- 28), 37] from sage.graphs.generators.families import LCFGraph g = LCFGraph(102, L, 1) g.name('Biggs-Smith graph') if (embedding == 1): orbs = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0], [17, 101, 25, 66, 20, 38, 53, 89, 48, 75, 56, 92, 45, 78, 34, 28, 63], [18, 36, 26, 65, 19, 37, 54, 90, 47, 76, 55, 91, 46, 77, 35, 27, 64], [21, 39, 52, 88, 49, 74, 57, 93, 44, 79, 33, 29, 62, 83, 100, 24, 67], [22, 97, 51, 96, 50, 95, 58, 94, 59, 80, 60, 81, 61, 82, 99, 23, 98], [30, 86, 84, 72, 70, 68, 42, 40, 31, 87, 85, 73, 71, 69, 43, 41, 32]] g._circle_embedding(orbs[1], center=((- 0.4), 0), radius=0.2) g._circle_embedding(orbs[3], center=(0.4, 0), radius=0.2, shift=4) g._circle_embedding(orbs[0], center=((- 0.9), (- 0.5)), radius=0.3, shift=2) g._circle_embedding(orbs[2], center=((- 0.9), 0.5), radius=0.3) g._circle_embedding(orbs[4], center=(0.9, (- 0.5)), radius=0.3, shift=4) g._circle_embedding(orbs[5], center=(0.9, 0.5), radius=0.3, shift=(- 2)) elif (embedding == 2): pass else: raise ValueError('the value of embedding must be 1 or 2') return g
def get_parser_and_partitioner(task_name) -> Tuple[(Type[Parser], Type[PartitioningTask])]: if (task_name in REGISTRY): return REGISTRY[task_name] else: raise ValueError(f'unknown task {task_name} available tasks {list(REGISTRY.keys())}')
def test_bad_predict(): _bm = BaseBoostedRelationalModel() with pytest.raises(NotImplementedError): _bm.predict('database')
def register_Ns3WifiModeChecker_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::WifiModeChecker const &', 'arg0')]) return
def deprecated_function(*, removed_in: str, replacement: str) -> Callable: def wrapper(func: Callable) -> Callable: def inner(*args: Any, **kwargs: Any) -> Any: _warn_deprecation(kind='Function', thing=func.__name__, removed_in=removed_in, replacement=replacement) return func(*args, **kwargs) return inner return wrapper
class docTableTypeSub(supermod.docTableType): def __init__(self, rows=None, cols=None, row=None, caption=None): supermod.docTableType.__init__(self, rows, cols, row, caption)
class ConformerConvBlock(rf.Module): def __init__(self, out_dim: Dim, *, kernel_size: int, norm: Union[(rf.BatchNorm, Any)]): super().__init__() self.out_dim = out_dim self.positionwise_conv1 = rf.Linear(out_dim, (2 * out_dim)) self.depthwise_conv = rf.Conv1d(out_dim, out_dim, filter_size=kernel_size, groups=out_dim.dimension, padding='same') self.positionwise_conv2 = rf.Linear(out_dim, out_dim) self.norm = norm def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor: x_conv1 = self.positionwise_conv1(inp) (x_act, _) = rf.gating(x_conv1) (x_depthwise_conv, _) = self.depthwise_conv(x_act, in_spatial_dim=spatial_dim) x_normed = self.norm(x_depthwise_conv) x_swish = rf.swish(x_normed) x_conv2 = self.positionwise_conv2(x_swish) return x_conv2
class HGFilter(nn.Module): def __init__(self, opt): super(HGFilter, self).__init__() self.num_modules = opt.num_stack self.opt = opt self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) if (self.opt.norm == 'batch'): self.bn1 = nn.BatchNorm2d(64) elif (self.opt.norm == 'group'): self.bn1 = nn.GroupNorm(32, 64) if (self.opt.hg_down == 'conv64'): self.conv2 = ConvBlock(64, 64, self.opt.norm) self.down_conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) elif (self.opt.hg_down == 'conv128'): self.conv2 = ConvBlock(64, 128, self.opt.norm) self.down_conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1) elif (self.opt.hg_down == 'ave_pool'): self.conv2 = ConvBlock(64, 128, self.opt.norm) else: raise NameError('Unknown Fan Filter setting!') self.conv3 = ConvBlock(128, 128, self.opt.norm) self.conv4 = ConvBlock(128, 256, self.opt.norm) for hg_module in range(self.num_modules): self.add_module(('m' + str(hg_module)), HourGlass(1, opt.num_hourglass, 256, self.opt.norm, self.opt.upsample_mode)) self.add_module(('top_m_' + str(hg_module)), ConvBlock(256, 256, self.opt.norm)) self.add_module(('conv_last' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) if (self.opt.norm == 'batch'): self.add_module(('bn_end' + str(hg_module)), nn.BatchNorm2d(256)) elif (self.opt.norm == 'group'): self.add_module(('bn_end' + str(hg_module)), nn.GroupNorm(32, 256)) self.add_module(('l' + str(hg_module)), nn.Conv2d(256, opt.hourglass_dim, kernel_size=1, stride=1, padding=0)) if (hg_module < (self.num_modules - 1)): self.add_module(('bl' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) self.add_module(('al' + str(hg_module)), nn.Conv2d(opt.hourglass_dim, 256, kernel_size=1, stride=1, padding=0)) if ((hg_module == (self.num_modules - 1)) and self.opt.recover_dim): self.add_module(('bl' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) self.add_module(('al' + str(hg_module)), nn.Conv2d(opt.hourglass_dim, 256, kernel_size=1, stride=1, padding=0)) if self.opt.recover_dim: self.recover_dim_match_fea_1 = nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0) self.recover_dim_conv_1 = ConvBlock(256, 256, self.opt.norm) self.recover_dim_match_fea_2 = nn.Conv2d(3, 256, kernel_size=1, stride=1, padding=0) self.recover_dim_conv_2 = ConvBlock(256, 256, self.opt.norm) def forward(self, x): raw_x = x x = F.relu(self.bn1(self.conv1(x)), True) tmpx = x if (self.opt.hg_down == 'ave_pool'): x = F.avg_pool2d(self.conv2(x), 2, stride=2) elif (self.opt.hg_down in ['conv64', 'conv128']): x = self.conv2(x) x = self.down_conv2(x) else: raise NameError('Unknown Fan Filter setting!') normx = x x = self.conv3(x) x = self.conv4(x) previous = x outputs = [] for i in range(self.num_modules): hg = self._modules[('m' + str(i))](previous) ll = hg ll = self._modules[('top_m_' + str(i))](ll) ll = F.relu(self._modules[('bn_end' + str(i))](self._modules[('conv_last' + str(i))](ll)), True) tmp_out = self._modules[('l' + str(i))](ll) outputs.append(tmp_out) if (i < (self.num_modules - 1)): ll = self._modules[('bl' + str(i))](ll) tmp_out_ = self._modules[('al' + str(i))](tmp_out) previous = ((previous + ll) + tmp_out_) if ((i == (self.num_modules - 1)) and self.opt.recover_dim): ll = self._modules[('bl' + str(i))](ll) tmp_out_ = self._modules[('al' + str(i))](tmp_out) fea_upsampled = ((previous + ll) + tmp_out_) if (self.opt.upsample_mode == 'bicubic'): fea_upsampled = F.interpolate(fea_upsampled, scale_factor=2, mode='bicubic', align_corners=True) elif (self.opt.upsample_mode == 'nearest'): fea_upsampled = F.interpolate(fea_upsampled, scale_factor=2, mode='nearest') else: print('Error: undefined self.upsample_mode {} when self.opt.recover_dim {}!'.format(self.opt.upsample_mode, self.opt.recover_dim)) fea_upsampled = (fea_upsampled + self.recover_dim_match_fea_1(tmpx)) fea_upsampled = self.recover_dim_conv_1(fea_upsampled) if (self.opt.upsample_mode == 'bicubic'): fea_upsampled = F.interpolate(fea_upsampled, scale_factor=2, mode='bicubic', align_corners=True) elif (self.opt.upsample_mode == 'nearest'): fea_upsampled = F.interpolate(fea_upsampled, scale_factor=2, mode='nearest') else: print('Error: undefined self.upsample_mode {} when self.opt.recover_dim {}!'.format(self.opt.upsample_mode, self.opt.recover_dim)) fea_upsampled = (fea_upsampled + self.recover_dim_match_fea_2(raw_x)) fea_upsampled = self.recover_dim_conv_2(fea_upsampled) outputs.append(fea_upsampled) return (outputs, tmpx.detach(), normx)
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('base_axis, weight_shape', [(1, (12, 2, 3)), (2, (4, 4)), ((- 1), (4, 4)), ((- 2), (12, 3, 4))]) .parametrize('bias', [True, False]) def test_affine_forward_backward(seed, base_axis, weight_shape, bias, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32)] inputs += [rng.randn(*weight_shape).astype(np.float32)] if bias: inputs += [rng.randn(*weight_shape[1:]).astype(np.float32)] else: inputs += [None] function_tester(rng, F.affine, ref_affine, inputs, func_args=[base_axis], atol_b=0.01, dstep=0.001, ctx=ctx, func_name=func_name)
.parametrize('extensionarray', [False, True]) def test_unionarray(tmp_path, extensionarray): akarray = ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 3, 2, 1, 2, 0], dtype=np.int64)), [ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2]), parameters={'which': 'inner1'}), ak.contents.NumpyArray(np.array([[0], [10], [20], [30]], dtype=np.int32), parameters={'which': 'inner2'})], parameters={'which': 'outer'}) paarray = akarray.to_arrow(extensionarray=extensionarray) arrow_round_trip(akarray, paarray, extensionarray) akarray = ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 3, 2, 1, 2, 0], dtype=np.int64)), [ak.contents.UnmaskedArray(ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2]), parameters={'which': 'inner1'})), ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, False, True, False]).view(np.int8)), ak.contents.NumpyArray(np.array([[0], [10], [20], [30]], dtype=np.int32), parameters={'which': 'inner2'}), valid_when=False, parameters={'which': 'middle'})], parameters={'which': 'outer'}) paarray = akarray.to_arrow(extensionarray=extensionarray) arrow_round_trip(akarray, paarray, extensionarray) akarray = ak.contents.ByteMaskedArray.simplified(ak.index.Index8(np.array([False, True, False, True, False, True, True]).view(np.int8)), ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 3, 2, 1, 2, 0], dtype=np.int64)), [ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2]), parameters={'which': 'inner1'}), ak.contents.NumpyArray(np.array([[0], [10], [20], [30]], dtype=np.int32), parameters={'which': 'inner2'})], parameters={'which': 'middle'}), valid_when=True, parameters={'which': 'outer'}) paarray = akarray.to_arrow(extensionarray=extensionarray) arrow_round_trip(akarray, paarray, extensionarray) akarray = ak.contents.ByteMaskedArray.simplified(ak.index.Index8(np.array([False, True, False, True, False, True, True]).view(np.int8)), ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 3, 2, 1, 2, 0], dtype=np.int64)), [ak.contents.UnmaskedArray(ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2]), parameters={'which': 'inner1'})), ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, False, True, False]).view(np.int8)), ak.contents.NumpyArray(np.array([[0], [10], [20], [30]], dtype=np.int32), parameters={'which': 'inner2'}), valid_when=False, parameters={'which': 'middle1'})], parameters={'which': 'middle2'}), valid_when=True, parameters={'which': 'outer'}) paarray = akarray.to_arrow(extensionarray=extensionarray) arrow_round_trip(akarray, paarray, extensionarray)
def validate(cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, subsets: List[str]) -> List[Optional[float]]: if (cfg.dataset.fixed_validation_seed is not None): utils.set_torch_seed(cfg.dataset.fixed_validation_seed) trainer.begin_valid_epoch(epoch_itr.epoch) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False, set_dataset_epoch=False) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar(itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=(cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None), default_log_format=('tqdm' if (not cfg.common.no_progress_bar) else 'simple'), wandb_project=(cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None), wandb_run_name=os.environ.get('WANDB_NAME', os.path.basename(cfg.checkpoint.save_dir))) with metrics.aggregate(new_root=True) as agg: for (i, sample) in enumerate(progress): if ((cfg.dataset.max_valid_steps is not None) and (i > cfg.dataset.max_valid_steps)): break trainer.valid_step(sample) stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values()) if hasattr(task, 'post_validate'): task.post_validate(trainer.get_model(), stats, agg) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric]) return valid_losses
class DOMElement(object): def __init__(self, raw_dom, parent=None, dom_elements=None): self._parent = parent self._tag = raw_dom['tag'].lower() self._left = raw_dom['left'] self._top = raw_dom['top'] self._width = raw_dom['width'] self._height = raw_dom['height'] self._ref = raw_dom.get('ref') if (self.tag == 't'): self._ref = None if ('text' in raw_dom): self._text = unicode(raw_dom['text']) else: self._text = None self._value = raw_dom.get('value') self._id = raw_dom.get('id') classes = raw_dom.get('classes', 'TEXT_CLASS') if isinstance(classes, dict): classes = 'SVG_CLASS' elif (classes == ''): classes = 'NO_CLASS' self._classes = classes self._bg_color = self._rgba_str_to_floats(raw_dom.get('bgColor')) self._fg_color = self._rgba_str_to_floats(raw_dom.get('fgColor')) self._focused = raw_dom.get('focused', False) self._tampered = raw_dom.get('tampered', False) self._targeted = raw_dom.get('recordingTarget', False) self._children = [] for raw_child in raw_dom['children']: self._children.append(DOMElement(raw_child, parent=self, dom_elements=dom_elements)) if (self._children and all(((child.tag == 't') for child in self._children))): self._text = u' '.join((child.text for child in self._children)) self._children = [] if (dom_elements is not None): dom_elements.append(self) def __eq__(self, other): if (not isinstance(other, DOMElement)): return False return (self.ref == other.ref) def __ne__(self, other): return (not self.__eq__(other)) def __hash__(self): return super(DOMElement, self).__hash__() def to_dict(self): return {'tag': self.tag, 'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height, 'text': self.text, 'value': self.value, 'id': self.id, 'classes': self.classes} def tag(self): return self._tag def left(self): return self._left def top(self): return self._top def width(self): return self._width def height(self): return self._height def right(self): return (self._left + self._width) def bottom(self): return (self._top + self._height) def ref(self): return self._ref def text(self): return self._text def value(self): return self._value def id(self): return self._id def classes(self): return self._classes def bg_color(self): return self._bg_color def fg_color(self): return self._fg_color def focused(self): return self._focused def tampered(self): return self._tampered def targeted(self): return self._targeted def is_leaf(self): return (self._text is not None) def children(self): return self._children def parent(self): return self._parent def ancestor_path(self): path = [] curr = self while curr.parent: path.append(curr) curr = curr.parent return list(reversed(path)) def depth(self): return len(self.ancestor_path) def __str__(self): if self.text: text = self.text text = ((text[:20] + '...') if (len(text) > 20) else text) text_str = ' text={}'.format(repr(text)) else: text_str = '' value_str = (' value={}'.format(self.value) if (self.value is not None) else '') classes_str = ' classes=[{}]'.format(self.classes) num_children = len(self.children) children_str = (' children={}'.format(num_children) if (num_children != 0) else '') return '[{ref}] {tag} ({left}, {top}){text}{value}{classes}{children}'.format(ref=self.ref, tag=self.tag, left=round(self.left, 2), top=round(self.top, 2), text=text_str, value=value_str, classes=classes_str, children=children_str) __repr__ = __str__ def visualize(self, join=True): lines = [] lines.append('- {}'.format(self)) for (i, child) in enumerate(self.children): if isinstance(child, unicode): child = ((child[:20] + '...') if (len(child) > 20) else child) lines.append(' |- "{}"'.format(child)) else: for (j, line) in enumerate(child.visualize(join=False)): prefix = (' ' if ((i == (len(self.children) - 1)) and j) else ' |') lines.append((prefix + line)) return ('\n'.join(lines) if join else lines) def lca(self, other): if (self in other.ancestor_path): return self elif (other in self.ancestor_path): return other for (i, (self_ancestor, other_ancestor)) in enumerate(zip(self.ancestor_path, other.ancestor_path)): if (self_ancestor != other_ancestor): return self.ancestor_path[(i - 1)] raise ValueError('{} is not in the same DOM tree as {}\n\nself tree: {}\n\nother tree: {}'.format(self, other, self.visualize(), other.visualize())) def diff(self, other_dom): def element_diff(first, second, l): if (second is None): l.append(first) for child in first.children: element_diff(child, None, l) return elif (first is None): l.append(second) for child in second.children: element_diff(child, None, l) return if (first.ref != second.ref): l.append(first) l.append(second) elif ((first.text != second.text) or (first.tampered != second.tampered) or (first.value != second.value) or (first.width != second.width) or (first.height != second.height) or (first.classes != second.classes) or (first.tag != second.tag) or (first.fg_color != second.fg_color) or (first.bg_color != second.bg_color) or (first.is_leaf != second.is_leaf)): l.append(first) first_children = list(first.children) second_children = list(second.children) if (len(first_children) < len(second_children)): first_children += ([None] * (len(second_children) - len(first_children))) elif (len(first_children) > len(second_children)): second_children += ([None] * (len(first_children) - len(second_children))) for (first_child, second_child) in zip(first_children, second_children): element_diff(first_child, second_child, l) different_elements = [] element_diff(self, other_dom, different_elements) return different_elements def _rgba_str_to_floats(self, rgba): if (rgba is None): return (1.0, 1.0, 1.0, 1.0) if ('rgba' in rgba): m = re.search('rgba\\(([0-9.]+), ([0-9.]+), ([0-9.]+), ([0-9.]+)\\)', rgba) a = float(m.group(4)) else: m = re.search('rgb\\(([0-9.]+), ([0-9.]+), ([0-9.]+)\\)', rgba) a = 1.0 return ((float(m.group(1)) / 255), (float(m.group(2)) / 255), (float(m.group(3)) / 255), a)
def init_sage(controller=None): try: import matplotlib.font_manager except ImportError: pass else: matplotlib.use('agg') import sage.doctest sage.doctest.DOCTEST_MODE = True import sage.misc.randstate from sage.cpython._py2_random import Random sage.misc.randstate.DEFAULT_PYTHON_RANDOM = Random IPython.lib.pretty.for_type(dict, _sorted_dict_pprinter_factory('{', '}')) if (controller is None): import sage.repl.ipython_kernel.all_jupyter else: controller.load_environment() try: from sage.interfaces.quit import invalidate_all invalidate_all() except ModuleNotFoundError: pass from cysignals.signals import set_debug_level set_debug_level(0) from sage.repl.rich_output import get_display_manager dm = get_display_manager() from sage.repl.rich_output.backend_doctest import BackendDoctest dm.switch_backend(BackendDoctest()) from sage.structure.debug_options import debug debug.refine_category_hash_check = True try: import readline except ModuleNotFoundError: pass try: import sympy except ImportError: pass else: from sympy.printing.pretty.stringpict import stringPict stringPict.terminal_width = (lambda self: 0)
def get_trans_func(name): trans_funcs = {'bottleneck_transform': BottleneckTransform, 'basic_transform': BasicTransform} assert (name in trans_funcs.keys()), "Transformation function '{}' not supported".format(name) return trans_funcs[name]
def load_label(filepath): grpm2id = dict() id2grpm = dict() vocab_data_frame = pd.read_csv(filepath, encoding='utf-8') id_list = vocab_data_frame['id'] grpm_list = vocab_data_frame['grpm'] for (_id, grpm) in zip(id_list, grpm_list): grpm2id[grpm] = _id id2grpm[_id] = grpm return (grpm2id, id2grpm)
.dataclass class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None
def main(): cli_ctx = CliContext() try: cli.main(obj=cli_ctx) finally: cli_ctx.cleanup()
def process_records(records, metric): assert ('sample_wise_metric' in records), 'Utterance-wise / sample-wise metric is necessary for proceeding the Paired Sample T-test.' average = torch.FloatTensor(records[metric]).mean().item() return (average, records['sample_wise_metric'])
class StandardVirtualAgentExecutorWithToolkit(AgentExecutorWithToolkit): llm_simulator_chain: LLMChain llm_critiquer: Optional[BaseLanguageModel] = None num_critique_steps: Optional[int] = 0 max_allowed_steps: Optional[int] = 3 sim_system_info: Prompt = STD_SIMULATOR_SYSTEM_INFO sim_prompt_instruction: Prompt = STD_SIMULATOR_PROMPT critique_prompt: Prompt = STD_SIMULATOR_CRITIQUE critique_prompt_repeat: Prompt = STD_SIMULATOR_CRITIQUE_REPEAT _input_keys: List[str] = ['input'] def from_agent_and_toolkits(cls, agent: Union[(BaseSingleActionAgent, BaseMultiActionAgent)], toolkits: Sequence[BaseToolkit], llm_simulator: BaseLanguageModel, llm_critiquer: Optional[BaseLanguageModel]=None, num_critique_steps: Optional[int]=0, max_allowed_steps: Optional[int]=3, callback_manager: Optional[BaseCallbackManager]=None, use_chat_format: Optional[bool]=False, **kwargs: Any) -> AgentExecutor: tools = agent.get_all_tools(toolkits) tool_names = [tool.name for tool in tools] if use_chat_format: assert isinstance(llm_simulator, BaseChatModel) simulator_prompt = cls.create_simulator_prompt(use_chat_format=use_chat_format) llm_simulator_chain = LLMChain(llm=llm_simulator, prompt=simulator_prompt, callback_manager=callback_manager) if (llm_critiquer is None): llm_critiquer = llm_simulator return cls(agent=agent, tools=tools, toolkits=toolkits, tool_names=tool_names, llm_simulator_chain=llm_simulator_chain, llm_critiquer=llm_critiquer, num_critique_steps=num_critique_steps, max_allowed_steps=max_allowed_steps, callback_manager=callback_manager, **kwargs) def get_var(cls, name): return cls.__fields__[name].default def create_simulator_prompt(cls, use_chat_format: Optional[bool]=False) -> BasePromptTemplate: inputs = dict() system_info = cls.get_var('sim_system_info') prompt_instruction = cls.get_var('sim_prompt_instruction') (system_info, prompt_instruction) = format_multiple_prompts([system_info, prompt_instruction], inputs, include_brackets=[False, True]) if use_chat_format: simulator_system_message = SystemMessage(content=system_info) simulator_instruction_message = HumanMessagePromptTemplate.from_template(template=prompt_instruction) messages = [simulator_system_message, simulator_instruction_message] return ChatPromptTemplate.from_messages(messages=messages) else: template = '\n\n'.join([system_info, prompt_instruction]) input_variables = (cls.get_var('_input_keys') + ['simulator_scratchpad']) return PromptTemplate(template=template, input_variables=input_variables) def _get_current_toolkit_descriptions(self, tool_name: str) -> str: for toolkit in self.toolkits: for tool in toolkit.tools: if (tool.name == tool_name): return toolkit.create_description(detail_level='low') raise ValueError(f'Tool {tool_name} not found in any of the toolkits.') def input_keys(self) -> List[str]: return self._input_keys def generatetion_prefix(self) -> str: return 'Simulator Thought: ' def thought_summary_prefix(self) -> str: return 'Simulator Log Summary: ' def stop_seqs(self) -> List[str]: return ['\nThought:', '\n\tThought:', '\nAction:', '\n\tAction:'] def llm_simulator_tool(self) -> BaseTool: result = StructuredTool.from_function(func=(lambda callbacks, **kwargs: self._get_simulated_observation(callbacks, **kwargs)), name='llm_simulator', description='Simulate the execution of a tool with a language model', args_schema=SimulatorInputModel) return result def _fix_observation_text(self, text: str): return (text.rstrip() + '\n') def _extract_observation_and_thought(self, llm_output: str) -> Optional[List[str]]: regex = f'''{self.thought_summary_prefix}(.*?)[ ]*{self.agent.observation_prefix}[\s]*(.*)''' match = re.search(regex, llm_output, re.DOTALL) if (not match): return None thought_summary = match.group(1).strip() observation = match.group(2).strip() return (observation, thought_summary) def _get_simulated_observation(self, callback_manager: CallbackManager, **full_inputs: Any) -> SimulatedObservation: streaming_output = self.llm_simulator_chain.llm.streaming if streaming_output: print(('\n' + self.generatetion_prefix)) full_output = self.llm_simulator_chain.predict(**full_inputs, stop=self.stop_seqs) parsed_output = self._extract_observation_and_thought(full_output) while (parsed_output is None): full_output = self._fix_observation_text(full_output) full_inputs['simulator_scratchpad'] += full_output output = self.llm_simulator_chain.predict(**full_inputs, stop=self.stop_seqs) full_output += output parsed_output = self._extract_observation_and_thought(full_output) log_output = (self.generatetion_prefix + full_output) log_output = log_output.split(self.agent.observation_prefix)[0].strip() log_output = ('\n' + log_output) if ((not streaming_output) and (not log_output.isspace())): for handler in callback_manager.handlers: getattr(handler, 'on_tool_end')(log_output, verbose=self.verbose) sim_observation = SimulatedObservation(observation=parsed_output[0], thought_summary=parsed_output[1], log=full_output) observation = self._critique_simulated_observation(callback_manager, sim_observation, full_inputs) return observation def _construct_simulator_scratchpad(self, intermediate_steps: List[Tuple[(AgentAction, str)]], include_simulator_log: bool=False, include_simulator_thought_summary: bool=True, include_simulator_last_step_only: bool=False): scratchpad = '' for (idx, (action, observation)) in enumerate(intermediate_steps): scratchpad += f'''Action: {action.tool} Action Input: {action.tool_input} ''' if (idx == (len(intermediate_steps) - 1)): scratchpad += '\n' elif (include_simulator_log and ((not include_simulator_last_step_only) or (idx == (len(intermediate_steps) - 2)))): scratchpad += f''' {self.generatetion_prefix}{observation.log} ''' elif (include_simulator_thought_summary and ((not include_simulator_last_step_only) or (idx == (len(intermediate_steps) - 2)))): scratchpad += f''' {self.thought_summary_prefix}{observation.thought_summary} {self.agent.observation_prefix}{observation.observation} ''' else: scratchpad += f''' {self.agent.observation_prefix}{observation.observation} ''' scratchpad += self.generatetion_prefix return scratchpad def _create_critiquer_prompt(self, simulator_inputs: Dict[(str, str)], sim_observation: SimulatedObservation, critique_outputs: List[Dict[(str, str)]]) -> BasePromptTemplate: refnames = collect_refnames(dict(sim_prompt=self.sim_prompt_instruction, crit_prompt=self.critique_prompt)) critique_prompt = format_prompt(self.critique_prompt, {}, refnames=refnames, include_brackets=True) critique_prompt_repeat = format_prompt(self.critique_prompt_repeat, {}, refnames=refnames, include_brackets=True) simulator_prompt_temp = self.llm_simulator_chain.prompt use_chat_format = isinstance(simulator_prompt_temp, ChatPromptTemplate) simulator_prompt = simulator_prompt_temp.format_prompt(**simulator_inputs) critique_prompt_messages = [] if use_chat_format: critique_prompt_messages += simulator_prompt.messages else: critique_prompt_messages.append(HumanMessage(content=simulator_prompt)) simulator_output = sim_observation.log critique_prompt_messages.append(AIMessage(content=simulator_output)) for (idx, crit_dict) in enumerate(critique_outputs): prompt = (critique_prompt if (idx == 0) else critique_prompt_repeat) prompt = f'''{crit_dict['validation']} {prompt}''' critique_prompt_messages.append(HumanMessage(content=prompt)) if ('critique' in crit_dict): critique_prompt_messages.append(AIMessage(content=crit_dict['critique'])) if (not use_chat_format): critique_prompt_messages = '\n\n'.join([t.content for t in critique_prompt_messages]) return critique_prompt_messages def critique_prefix(self) -> str: return 'Critique #{step}:' def revised_thought_summary_prefix(self) -> str: return 'Revised Simulator Log Summary #{step}:' def revised_observation_prefix(self) -> str: return 'Revised Observation #{step}:' def _extract_revised_observation_and_thought(self, critique_llm_output: str, current_step: int) -> Optional[List[str]]: thought_summary_prefix = self.revised_thought_summary_prefix.format(step=current_step) observation_prefix = self.revised_observation_prefix.format(step=current_step) regex = f'''{thought_summary_prefix}(.*?)[ ]*{observation_prefix}[\s]*(.*)''' match = re.search(regex, critique_llm_output, re.DOTALL) if (not match): return None revised_thought_summary = match.group(1).strip() revised_observation = match.group(2).strip() return (revised_observation, revised_thought_summary) def _critique_simulated_observation(self, callback_manager: CallbackManager, sim_observation: SimulatedObservation, simulator_inputs: Dict[(str, Any)]): streaming_output = self.llm_critiquer.streaming tool_name = simulator_inputs['current_tool'] tool_mapping = dict(zip(self.tool_names, self.tools)) tool = tool_mapping[tool_name] def get_validation_result(obs): msg = 'The format of the output matches the specification of the tool.' exception = None try: outputs = json.loads(obs) except json.decoder.JSONDecodeError as e: msg = f'The output is not a valid JSON object.' exception = e if (exception is None): try: validate_outputs(tool.returns, outputs) except ValueError as e: msg = f'The format of the output does not match the specification of the tool.' exception = e return (f'Format Validation: {msg}', exception) current_obs = sim_observation.observation critique_outputs = [] sep = '\n\n' revised_output = None if (self.max_allowed_steps <= 0): return sim_observation for step in range(self.max_allowed_steps): step_idx = (step + 1) (validation_msg, exception) = get_validation_result(current_obs) if (exception is not None): validation_msg += f' {exception}' elif (step_idx > self.num_critique_steps): break critique_outputs.append({'validation': validation_msg}) critiquer_prompt = self._create_critiquer_prompt(simulator_inputs, sim_observation, critique_outputs) if streaming_output: print(f''' {validation_msg} ''') crit_out = self.llm_critiquer.generate([critiquer_prompt], stop=[self.critique_prefix.format(step=(step_idx + 1)), 'Action:', 'Action Input:']) assert (len(crit_out.generations) == 1) crit_out = crit_out.generations[0][0].text critique_outputs[(- 1)]['critique'] = crit_out revised_output = self._extract_revised_observation_and_thought(crit_out, current_step=step_idx) current_obs = (revised_output[0] if revised_output else current_obs) log_output = (((sep + validation_msg) + '\n') + crit_out) if ((not streaming_output) and (not log_output.isspace())): for handler in callback_manager.handlers: getattr(handler, 'on_tool_end')(log_output, verbose=self.verbose) if (revised_output is None): return sim_observation logs = [sim_observation.log] for crit_dict in critique_outputs: logs.append(((crit_dict['validation'] + '\n') + crit_dict['critique'])) log_output_with_critique = sep.join(logs) critiqued_observation = SimulatedObservation(observation=revised_output[0], thought_summary=revised_output[1], log=log_output_with_critique) return critiqued_observation def _take_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]], run_manager: Optional[CallbackManagerForChainRun]=None) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]: output = self.agent.plan(intermediate_steps, **inputs) if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output result = [] for agent_action in actions: if run_manager: run_manager.on_agent_action(agent_action, verbose=self.verbose, color='green') if (agent_action.tool in name_to_tool_map): tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() empty_observation = '' simulator_scratchpad = self._construct_simulator_scratchpad(((intermediate_steps + result) + [(agent_action, empty_observation)])) full_inputs = {'simulator_scratchpad': simulator_scratchpad, 'current_tool': agent_action.tool, 'current_tool_description': tool.description, 'toolkit_descriptions': self._get_current_toolkit_descriptions(agent_action.tool), **inputs} observation = run_with_input_validation(self.llm_simulator_tool.run, full_inputs, tool, agent_action.tool_input, verbose=self.verbose, color=color, **tool_run_kwargs) if isinstance(observation, str): observation = SimulatedObservation(observation=observation, thought_summary='', log=observation) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation_text = InvalidTool(available_tools=self.tool_names).run(agent_action.tool, verbose=self.verbose, color=None, **tool_run_kwargs) observation = SimulatedObservation(observation=observation_text, thought_summary='', log=observation_text) result.append((agent_action, observation)) return result async def _atake_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]]) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]: output = (await self.agent.aplan(intermediate_steps, **inputs)) if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output result = [] for agent_action in actions: if self.callback_manager.is_async: (await self.callback_manager.on_agent_action(agent_action, verbose=self.verbose, color='green')) else: self.callback_manager.on_agent_action(agent_action, verbose=self.verbose, color='green') if (agent_action.tool in name_to_tool_map): tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() empty_observation = '' simulator_scratchpad = self.agent._construct_scratchpad(((intermediate_steps + result) + [(agent_action, empty_observation)]), include_last_observation=False) simulator_scratchpad += self.generatetion_prefix full_inputs = {'simulator_scratchpad': simulator_scratchpad, 'current_tool': agent_action.tool, 'current_tool_description': tool.description, 'toolkit_descriptions': self._get_current_toolkit_descriptions(agent_action.tool), **inputs} observation = (await run_with_input_validation(self.llm_simulator_tool.arun, full_inputs, tool, agent_action.tool_input, verbose=self.verbose, color=color, **tool_run_kwargs)) if isinstance(observation, str): observation = SimulatedObservation(observation=observation, thought_summary='', log=observation) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = (await InvalidTool(available_tools=self.tool_names).arun(agent_action.tool, verbose=self.verbose, color=None, **tool_run_kwargs)) observation = SimulatedObservation(observation=observation, thought_summary='', log=observation) result.append((agent_action, observation)) return result
def main(_): hvd.init() FLAGS.model_dir = (FLAGS.model_dir if (hvd.rank() == 0) else os.path.join(FLAGS.model_dir, str(hvd.rank()))) config = tf.ConfigProto() config.gpu_options.visible_device_list = str(hvd.local_rank()) tf.logging.set_verbosity(tf.logging.INFO) processor = raw_data_utils.get_processor(FLAGS.task_name) label_list = processor.get_labels() bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file, FLAGS.model_dropout) tf.gfile.MakeDirs(FLAGS.model_dir) flags_dict = tf.app.flags.FLAGS.flag_values_dict() with tf.gfile.Open(os.path.join(FLAGS.model_dir, 'FLAGS.json'), 'w') as ouf: json.dump(flags_dict, ouf) tf.logging.info('warmup steps {}/{}'.format(FLAGS.num_warmup_steps, FLAGS.num_train_steps)) save_checkpoints_steps = 500 tf.logging.info('setting save checkpoints steps to {:d}'.format(save_checkpoints_steps)) FLAGS.iterations_per_loop = min(save_checkpoints_steps, FLAGS.iterations_per_loop) if (FLAGS.use_tpu and FLAGS.tpu_name): tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) else: tpu_cluster_resolver = None is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=1, session_config=config, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, per_host_input_for_training=is_per_host)) model_fn = uda.model_fn_builder(bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, clip_norm=FLAGS.clip_norm, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_one_hot_embeddings, num_labels=len(label_list), unsup_ratio=FLAGS.unsup_ratio, uda_coeff=FLAGS.uda_coeff, tsa=FLAGS.tsa, print_feature=False, print_structure=False) estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, params={'model_dir': FLAGS.model_dir}, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info(' >>> sup data dir : {}'.format(FLAGS.sup_train_data_dir)) if (FLAGS.unsup_ratio > 0): tf.logging.info(' >>> unsup data dir : {}'.format(FLAGS.unsup_data_dir)) train_input_fn = proc_data_utils.training_input_fn_builder(FLAGS.sup_train_data_dir, FLAGS.unsup_data_dir, FLAGS.aug_ops, FLAGS.aug_copy, FLAGS.unsup_ratio) train_size = processor.get_train_size(FLAGS.raw_data_dir) train_steps = int((train_size / FLAGS.train_batch_size)) if FLAGS.do_eval: tf.logging.info(' >>> dev data dir : {}'.format(FLAGS.eval_data_dir)) eval_input_fn = proc_data_utils.evaluation_input_fn_builder(FLAGS.eval_data_dir, 'clas') eval_size = processor.get_dev_size(FLAGS.raw_data_dir) eval_steps = int((eval_size / FLAGS.eval_batch_size)) train_eval_input_fn = proc_data_utils.evaluation_input_fn_builder(FLAGS.sup_train_data_dir, 'clas') if (FLAGS.do_train and FLAGS.do_eval): hooks = [hvd.BroadcastGlobalVariablesHook(0)] tf.logging.info('***** Running training & evaluation *****') tf.logging.info(' Supervised batch size = %d', FLAGS.train_batch_size) tf.logging.info(' Unsupervised batch size = %d', (FLAGS.train_batch_size * FLAGS.unsup_ratio)) tf.logging.info(' training size = %d', train_size) tf.logging.info(' training num steps = %d', train_steps) tf.logging.info(' evaluation batch size = %d', FLAGS.eval_batch_size) tf.logging.info(' dev num steps = %d', eval_steps) best_acc = 0 for _ in range(0, FLAGS.num_train_steps, save_checkpoints_steps): tf.logging.info('*** Running training ***') estimator.train(input_fn=train_input_fn, steps=save_checkpoints_steps, hooks=hooks) tf.logging.info('*** Running evaluation ***') train_result = estimator.evaluate(input_fn=train_eval_input_fn, steps=train_steps) tf.logging.info('>> Train Results:') for key in train_result.keys(): tf.logging.info(' %s = %s', key, str(train_result[key])) train_result[key] = train_result[key].item() dev_result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) tf.logging.info('>> Results:') for key in dev_result.keys(): tf.logging.info(' %s = %s', key, str(dev_result[key])) dev_result[key] = dev_result[key].item() best_acc = max(best_acc, dev_result['eval_precision']) tf.logging.info('***** Final evaluation result *****') tf.logging.info('Best acc: {:.3f}\n\n'.format(best_acc)) elif FLAGS.do_train: tf.logging.info('***** Running training *****') tf.logging.info(' Supervised batch size = %d', FLAGS.train_batch_size) tf.logging.info(' Unsupervised batch size = %d', (FLAGS.train_batch_size * FLAGS.unsup_ratio)) tf.logging.info(' Num steps = %d', FLAGS.num_train_steps) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) elif FLAGS.do_eval: tf.logging.info('***** Running evaluation *****') tf.logging.info(' Base evaluation batch size = %d', FLAGS.eval_batch_size) tf.logging.info(' Num steps = %d', eval_steps) checkpoint_state = tf.train.get_checkpoint_state(FLAGS.model_dir) best_acc = 0 for ckpt_path in checkpoint_state.all_model_checkpoint_paths: if (not tf.gfile.Exists((ckpt_path + '.data-00000-of-00001'))): tf.logging.info('Warning: checkpoint {:s} does not exist'.format(ckpt_path)) continue tf.logging.info('Evaluating {:s}'.format(ckpt_path)) dev_result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=ckpt_path) tf.logging.info('>> Results:') for key in dev_result.keys(): tf.logging.info(' %s = %s', key, str(dev_result[key])) dev_result[key] = dev_result[key].item() best_acc = max(best_acc, dev_result['eval_precision']) tf.logging.info('***** Final evaluation result *****') tf.logging.info('Best acc: {:.3f}\n\n'.format(best_acc)) from utils import tokenization tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=True) id2label = dict(zip([i for i in range(len(label_list))], label_list)) result = estimator.predict(input_fn=eval_input_fn) output_line = '' with open('label_test.txt', 'w') as writer: for re in result: sentence = re['input_ids'] gold = re['label_ids'] prediction = re['predict'] for (gold_index, gold_item) in enumerate(gold): if (gold_item >= 34): gold[gold_index] = 0 for (gold_index, gold_item) in enumerate(prediction): if (gold_item >= 34): gold[gold_index] = 0 for (w, gold_label, label) in zip(tokenizer.convert_ids_to_tokens([int(s) for s in sentence]), [id2label[id] for id in gold], [id2label[id] for id in prediction]): if (w == '[PAD]'): continue output_line = ((((((output_line + w) + ' ') + gold_label) + ' ') + label) + '\n') output_line += '\n' writer.write(output_line)
def save_figure_to_numpy(fig): data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,))) data = np.transpose(data, (2, 0, 1)) return data
def find_tested_models(test_file): with open(os.path.join(PATH_TO_TESTS, test_file), 'r', encoding='utf-8', newline='\n') as f: content = f.read() all_models = re.findall('all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)', content) all_models += re.findall('all_model_classes\\s+=\\s+\\(([^\\)]*)\\)', content) if (len(all_models) > 0): model_tested = [] for entry in all_models: for line in entry.split(','): name = line.strip() if (len(name) > 0): model_tested.append(name) return model_tested