idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
236,300 | def retrieve ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x1c ) # 00011100 packet = a / b return packet | RETRIEVE Section 9 . 3 . 20 | 43 | 10 |
236,301 | def retrieveAcknowledge ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x1d ) # 00011101 packet = a / b return packet | RETRIEVE ACKNOWLEDGE Section 9 . 3 . 21 | 47 | 15 |
236,302 | def startCc ( CallControlCapabilities_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x9 ) # 00001001 packet = a / b if CallControlCapabilities_presence is 1 : c = CallControlCapabilitiesHdr ( ieiCCC = 0x15 , eightBitCCC = 0x0 ) packet = paclet / c return packet | START CC Section 9 . 3 . 23a | 94 | 10 |
236,303 | def startDtmf ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x35 ) # 00110101 c = KeypadFacilityHdr ( ieiKF = 0x2C , eightBitKF = 0x0 ) packet = a / b / c return packet | START DTMF Section 9 . 3 . 24 | 76 | 11 |
236,304 | def startDtmfReject ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x37 ) # 00110111 c = Cause ( ) packet = a / b / c return packet | START DTMF REJECT Section 9 . 3 . 26 | 55 | 13 |
236,305 | def status ( AuxiliaryStates_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x3d ) # 00111101 c = Cause ( ) d = CallState ( ) packet = a / b / c / d if AuxiliaryStates_presence is 1 : e = AuxiliaryStatesHdr ( ieiAS = 0x24 , eightBitAS = 0x0 ) packet = packet / e return packet | STATUS Section 9 . 3 . 27 | 105 | 8 |
236,306 | def statusEnquiry ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x34 ) # 00110100 packet = a / b return packet | STATUS ENQUIRY Section 9 . 3 . 28 | 45 | 12 |
236,307 | def stopDtmf ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x31 ) # 00110001 packet = a / b return packet | STOP DTMF Section 9 . 3 . 29 | 45 | 11 |
236,308 | def stopDtmfAcknowledge ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x32 ) # 00110010 packet = a / b return packet | STOP DTMF ACKNOWLEDGE Section 9 . 3 . 30 | 48 | 16 |
236,309 | def userInformation ( MoreData_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x20 ) # 000100000 c = UserUser ( ) packet = a / b / c if MoreData_presence is 1 : d = MoreDataHdr ( ieiMD = 0xA0 , eightBitMD = 0x0 ) packet = packet / d return packet | USER INFORMATION Section 9 . 3 . 31 | 96 | 8 |
236,310 | def attachRequest ( PTmsiSignature_presence = 0 , GprsTimer_presence = 0 , TmsiStatus_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x1 ) # 0000001 c = MsNetworkCapability ( ) d = AttachTypeAndCiphKeySeqNr ( ) f = DrxParameter ( ) g = MobileId ( ) h = RoutingAreaIdentification ( ) i = MsRadioAccessCapability ( ) packet = a / b / c / d / f / g / h / i if PTmsiSignature_presence is 1 : j = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / j if GprsTimer_presence is 1 : k = GprsTimer ( ieiGT = 0x17 ) packet = packet / k if TmsiStatus_presence is 1 : l = TmsiStatus ( ieiTS = 0x9 ) packet = packet / l return packet | ATTACH REQUEST Section 9 . 4 . 1 | 234 | 10 |
236,311 | def attachAccept ( PTmsiSignature_presence = 0 , GprsTimer_presence = 0 , MobileId_presence = 0 , MobileId_presence1 = 0 , GmmCause_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x2 ) # 00000010 c = AttachResult ( ) d = ForceToStandby ( ) e = GprsTimer ( ) f = RadioPriorityAndSpareHalfOctets ( ) h = RoutingAreaIdentification ( ) packet = a / b / c / d / e / f / h if PTmsiSignature_presence is 1 : i = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / i if GprsTimer_presence is 1 : j = GprsTimer ( ieiGT = 0x17 ) packet = packet / j if MobileId_presence is 1 : k = MobileIdHdr ( ieiMI = 0x18 , eightBitMI = 0x0 ) packet = packet / k if MobileId_presence1 is 1 : l = MobileIdHdr ( ieiMI = 0x23 , eightBitMI = 0x0 ) packet = packet / l if GmmCause_presence is 1 : m = GmmCause ( ieiGC = 0x25 ) packet = packet / m return packet | ATTACH ACCEPT Section 9 . 4 . 2 | 312 | 11 |
236,312 | def attachComplete ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x3 ) # 00000011 packet = a / b return packet | ATTACH COMPLETE Section 9 . 4 . 3 | 42 | 10 |
236,313 | def detachRequest ( GmmCause_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x5 ) # 00000101 c = DetachTypeAndForceToStandby ( ) packet = a / b / c if GmmCause_presence is 1 : e = GmmCause ( ieiGC = 0x25 ) packet = packet / e return packet | DETACH REQUEST Section 9 . 4 . 5 | 94 | 11 |
236,314 | def detachRequestMsOriginating ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x5 ) # 00000101 c = DetachTypeAndSpareHalfOctets ( ) packet = a / b / c return packet | DETACH REQUEST Section 9 . 4 . 5 . 2 | 61 | 13 |
236,315 | def detachAcceptMsTerminated ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x6 ) # 00000110 packet = a / b return packet | DETACH ACCEPT Section 9 . 4 . 6 . 1 | 46 | 14 |
236,316 | def detachAcceptMsOriginating ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x6 ) # 00000110 c = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c return packet | DETACH ACCEPT Section 9 . 4 . 6 . 2 | 62 | 14 |
236,317 | def ptmsiReallocationCommand ( PTmsiSignature_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x10 ) # 00010000 c = MobileId ( ) d = RoutingAreaIdentification ( ) e = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c / d / e if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g return packet | P - TMSI REALLOCATION COMMAND Section 9 . 4 . 7 | 127 | 17 |
236,318 | def ptmsiReallocationComplete ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x11 ) # 00010001 packet = a / b return packet | P - TMSI REALLOCATION COMPLETE Section 9 . 4 . 8 | 48 | 17 |
236,319 | def authenticationAndCipheringRequest ( AuthenticationParameterRAND_presence = 0 , CiphKeySeqNr_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x12 ) # 00010010 d = CipheringAlgorithmAndImeisvRequest ( ) e = ForceToStandbyAndAcReferenceNumber ( ) packet = a / b / d / e if AuthenticationParameterRAND_presence is 1 : g = AuthenticationParameterRAND ( ieiAPR = 0x21 ) packet = packet / g if CiphKeySeqNr_presence is 1 : h = CiphKeySeqNrHdr ( ieiCKSN = 0x08 , eightBitCKSN = 0x0 ) packet = packet / h return packet | AUTHENTICATION AND CIPHERING REQUEST Section 9 . 4 . 9 | 182 | 18 |
236,320 | def authenticationAndCipheringResponse ( AuthenticationParameterSRES_presence = 0 , MobileId_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x13 ) # 00010011 c = AcReferenceNumberAndSpareHalfOctets ( ) packet = a / b / c if AuthenticationParameterSRES_presence is 1 : e = AuthenticationParameterSRES ( ieiAPS = 0x22 ) packet = packet / e if MobileId_presence is 1 : f = MobileIdHdr ( ieiMI = 0x23 , eightBitMI = 0x0 ) packet = packet / f return packet | AUTHENTICATION AND CIPHERING RESPONSE Section 9 . 4 . 10 | 147 | 19 |
236,321 | def authenticationAndCipheringReject ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x14 ) # 00010100 packet = a / b return packet | AUTHENTICATION AND CIPHERING REJECT Section 9 . 4 . 11 | 48 | 18 |
236,322 | def routingAreaUpdateRequest ( PTmsiSignature_presence = 0 , GprsTimer_presence = 0 , DrxParameter_presence = 0 , TmsiStatus_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x8 ) # 00001000 c = UpdateTypeAndCiphKeySeqNr ( ) e = RoutingAreaIdentification ( ) f = MsNetworkCapability ( ) packet = a / b / c / e / f if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g if GprsTimer_presence is 1 : h = GprsTimer ( ieiGT = 0x17 ) packet = packet / h if DrxParameter_presence is 1 : i = DrxParameter ( ieiDP = 0x27 ) packet = packet / i if TmsiStatus_presence is 1 : j = TmsiStatus ( ieiTS = 0x9 ) packet = packet / j return packet | ROUTING AREA UPDATE REQUEST Section 9 . 4 . 14 | 245 | 14 |
236,323 | def routingAreaUpdateAccept ( PTmsiSignature_presence = 0 , MobileId_presence = 0 , MobileId_presence1 = 0 , ReceiveNpduNumbersList_presence = 0 , GprsTimer_presence = 0 , GmmCause_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x9 ) # 00001001 c = ForceToStandbyAndUpdateResult ( ) e = GprsTimer ( ) f = RoutingAreaIdentification ( ) packet = a / b / c / e / f if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g if MobileId_presence is 1 : h = MobileIdHdr ( ieiMI = 0x18 , eightBitMI = 0x0 ) packet = packet / h if MobileId_presence1 is 1 : i = MobileIdHdr ( ieiMI = 0x23 , eightBitMI = 0x0 ) packet = packet / i if ReceiveNpduNumbersList_presence is 1 : j = ReceiveNpduNumbersList ( ieiRNNL = 0x26 ) packet = packet / j if GprsTimer_presence is 1 : k = GprsTimer ( ieiGT = 0x17 ) packet = packet / k if GmmCause_presence is 1 : l = GmmCause ( ieiGC = 0x25 ) packet = packet / l return packet | ROUTING AREA UPDATE ACCEPT Section 9 . 4 . 15 | 345 | 15 |
236,324 | def routingAreaUpdateComplete ( ReceiveNpduNumbersList_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0xa ) # 00001010 packet = a / b if ReceiveNpduNumbersList_presence is 1 : c = ReceiveNpduNumbersList ( ieiRNNL = 0x26 ) packet = packet / c return packet | ROUTING AREA UPDATE COMPLETE Section 9 . 4 . 16 | 95 | 14 |
236,325 | def routingAreaUpdateReject ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0xb ) # 00001011 c = GmmCause ( ) d = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c / d return packet | ROUTING AREA UPDATE REJECT Section 9 . 4 . 17 | 70 | 14 |
236,326 | def gmmStatus ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x20 ) # 00100000 c = GmmCause ( ) packet = a / b / c return packet | GMM STATUS Section 9 . 4 . 18 | 53 | 10 |
236,327 | def activatePdpContextRequest ( AccessPointName_presence = 0 , ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x41 ) # 01000001 c = NetworkServiceAccessPointIdentifier ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) f = PacketDataProtocolAddress ( ) packet = a / b / c / d / e / f if AccessPointName_presence is 1 : g = AccessPointName ( ieiAPN = 0x28 ) packet = packet / g if ProtocolConfigurationOptions_presence is 1 : h = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / h return packet | ACTIVATE PDP CONTEXT REQUEST Section 9 . 5 . 1 | 169 | 15 |
236,328 | def activatePdpContextAccept ( PacketDataProtocolAddress_presence = 0 , ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x42 ) # 01000010 c = LlcServiceAccessPointIdentifier ( ) d = QualityOfService ( ) e = RadioPriorityAndSpareHalfOctets ( ) packet = a / b / c / d / e if PacketDataProtocolAddress_presence is 1 : f = PacketDataProtocolAddress ( ieiPDPA = 0x2B ) packet = packet / f if ProtocolConfigurationOptions_presence is 1 : g = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / g return packet | ACTIVATE PDP CONTEXT ACCEPT Section 9 . 5 . 2 | 170 | 16 |
236,329 | def requestPdpContextActivation ( AccessPointName_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x44 ) # 01000100 c = PacketDataProtocolAddress ( ) packet = a / b / c if AccessPointName_presence is 1 : d = AccessPointName ( ieiAPN = 0x28 ) packet = packet / d return packet | REQUEST PDP CONTEXT ACTIVATION Section 9 . 5 . 4 | 97 | 15 |
236,330 | def requestPdpContextActivationReject ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x45 ) # 01000101 c = SmCause ( ) packet = a / b / c return packet | REQUEST PDP CONTEXT ACTIVATION REJECT Section 9 . 5 . 5 | 57 | 17 |
236,331 | def modifyPdpContextRequest ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x48 ) # 01001000 c = RadioPriorityAndSpareHalfOctets ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) packet = a / b / c / d / e return packet | MODIFY PDP CONTEXT REQUEST Section 9 . 5 . 6 | 83 | 15 |
236,332 | def modifyPdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x45 ) # 01000101 packet = a / b return packet | MODIFY PDP CONTEXT ACCEPT Section 9 . 5 . 7 | 46 | 16 |
236,333 | def deactivatePdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x47 ) # 01000111 packet = a / b return packet | DEACTIVATE PDP CONTEXT ACCEPT Section 9 . 5 . 9 | 47 | 17 |
236,334 | def activateAaPdpContextRequest ( AccessPointName_presence = 0 , ProtocolConfigurationOptions_presence = 0 , GprsTimer_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x50 ) # 01010000 c = NetworkServiceAccessPointIdentifier ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) f = PacketDataProtocolAddress ( ) packet = a / b / c / d / e / f if AccessPointName_presence is 1 : g = AccessPointName ( ieiAPN = 0x28 ) packet = packet / g if ProtocolConfigurationOptions_presence is 1 : h = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / h if GprsTimer_presence is 1 : i = GprsTimer ( ieiGT = 0x29 ) packet = packet / i return packet | ACTIVATE AA PDP CONTEXT REQUEST Section 9 . 5 . 10 | 212 | 16 |
236,335 | def activateAaPdpContextAccept ( ProtocolConfigurationOptions_presence = 0 , GprsTimer_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x51 ) # 01010001 c = LlcServiceAccessPointIdentifier ( ) d = QualityOfService ( ) e = MobileId ( ) f = PacketDataProtocolAddress ( ) g = RadioPriorityAndSpareHalfOctets ( ) packet = a / b / c / d / e / f / g if ProtocolConfigurationOptions_presence is 1 : i = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / i if GprsTimer_presence is 1 : j = GprsTimer ( ieiGT = 0x29 ) packet = packet / j return packet | ACTIVATE AA PDP CONTEXT ACCEPT Section 9 . 5 . 11 | 184 | 17 |
236,336 | def activateAaPdpContextReject ( ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x52 ) # 01010010 c = SmCause ( ) packet = a / b / c if ProtocolConfigurationOptions_presence is 1 : d = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / d return packet | ACTIVATE AA PDP CONTEXT REJECT Section 9 . 5 . 12 | 95 | 16 |
236,337 | def deactivateAaPdpContextRequest ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x53 ) # 01010011 c = AaDeactivationCauseAndSpareHalfOctets ( ) packet = a / b / c return packet | DEACTIVATE AA PDP CONTEXT REQUEST Section 9 . 5 . 13 | 66 | 17 |
236,338 | def deactivateAaPdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x54 ) # 01010100 packet = a / b return packet | DEACTIVATE AA PDP CONTEXT ACCEPT Section 9 . 5 . 14 | 49 | 18 |
236,339 | def ls ( obj = None ) : if obj is None : import builtins all = builtins . __dict__ . copy ( ) all . update ( globals ( ) ) objlst = sorted ( conf . layers , key = lambda x : x . __name__ ) for o in objlst : print ( "%-10s : %s" % ( o . __name__ , o . name ) ) else : if isinstance ( obj , type ) and issubclass ( obj , Packet ) : for f in obj . fields_desc : print ( "%-10s : %-20s = (%s)" % ( f . name , f . __class__ . __name__ , repr ( f . default ) ) ) elif isinstance ( obj , Packet ) : for f in obj . fields_desc : print ( "%-10s : %-20s = %-15s (%s)" % ( f . name , f . __class__ . __name__ , repr ( getattr ( obj , f . name ) ) , repr ( f . default ) ) ) if not isinstance ( obj . payload , NoPayload ) : print ( "--" ) ls ( obj . payload ) else : print ( "Not a packet class. Type 'ls()' to list packet classes." ) | List available layers or infos on a given layer | 280 | 10 |
236,340 | def haslayer ( self , cls ) : if self . __class__ == cls or self . __class__ . __name__ == cls : return 1 for f in self . packetfields : fvalue_gen = self . getfieldval ( f . name ) if fvalue_gen is None : continue if not f . islist : fvalue_gen = SetGen ( fvalue_gen , _iterpacket = 0 ) for fvalue in fvalue_gen : if isinstance ( fvalue , Packet ) : ret = fvalue . haslayer ( cls ) if ret : return ret return self . payload . haslayer ( cls ) | true if self has a layer that is an instance of cls . Superseded by cls in self syntax . | 141 | 24 |
236,341 | def getlayer ( self , cls , nb = 1 , _track = None ) : if type ( cls ) is int : nb = cls + 1 cls = None if type ( cls ) is str and "." in cls : ccls , fld = cls . split ( "." , 1 ) else : ccls , fld = cls , None if cls is None or self . __class__ == cls or self . __class__ . name == ccls : if nb == 1 : if fld is None : return self else : return self . getfieldval ( fld ) else : nb -= 1 for f in self . packetfields : fvalue_gen = self . getfieldval ( f . name ) if fvalue_gen is None : continue if not f . islist : fvalue_gen = SetGen ( fvalue_gen , _iterpacket = 0 ) for fvalue in fvalue_gen : if isinstance ( fvalue , Packet ) : track = [ ] ret = fvalue . getlayer ( cls , nb , _track = track ) if ret is not None : return ret nb = track [ 0 ] return self . payload . getlayer ( cls , nb , _track = _track ) | Return the nb^th layer that is an instance of cls . | 281 | 15 |
236,342 | def show ( self , indent = 3 , lvl = "" , label_lvl = "" ) : ct = conf . color_theme print ( "%s%s %s %s" % ( label_lvl , ct . punct ( "###[" ) , ct . layer_name ( self . name ) , ct . punct ( "]###" ) ) ) for f in self . fields_desc : if isinstance ( f , ConditionalField ) and not f . _evalcond ( self ) : continue if isinstance ( f , Emph ) or f in conf . emph : ncol = ct . emph_field_name vcol = ct . emph_field_value else : ncol = ct . field_name vcol = ct . field_value fvalue = self . getfieldval ( f . name ) if isinstance ( fvalue , Packet ) or ( f . islist and f . holds_packets and type ( fvalue ) is list ) : print ( "%s \\%-10s\\" % ( label_lvl + lvl , ncol ( f . name ) ) ) fvalue_gen = SetGen ( fvalue , _iterpacket = 0 ) for fvalue in fvalue_gen : fvalue . show ( indent = indent , label_lvl = label_lvl + lvl + " |" ) else : begn = "%s %-10s%s " % ( label_lvl + lvl , ncol ( f . name ) , ct . punct ( "=" ) , ) reprval = f . i2repr ( self , fvalue ) if type ( reprval ) is str : reprval = reprval . replace ( "\n" , "\n" + " " * ( len ( label_lvl ) + len ( lvl ) + len ( f . name ) + 4 ) ) print ( "%s%s" % ( begn , vcol ( reprval ) ) ) self . payload . show ( indent = indent , lvl = lvl + ( " " * indent * self . show_indent ) , label_lvl = label_lvl ) | Prints a hierarchical view of the packet . indent gives the size of indentation for each layer . | 453 | 20 |
236,343 | def decode_payload_as ( self , cls ) : s = bytes ( self . payload ) self . payload = cls ( s , _internal = 1 , _underlayer = self ) pp = self while pp . underlayer is not None : pp = pp . underlayer self . payload . dissection_done ( pp ) | Reassembles the payload and decode it using another packet class | 71 | 12 |
236,344 | def libnet ( self ) : print ( "libnet_build_%s(" % self . __class__ . name . lower ( ) ) det = self . __class__ ( str ( self ) ) for f in self . fields_desc : val = det . getfieldval ( f . name ) if val is None : val = 0 elif type ( val ) is int : val = str ( val ) else : val = '"%s"' % str ( val ) print ( "\t%s, \t\t/* %s */" % ( val , f . name ) ) print ( ");" ) | Not ready yet . Should give the necessary C code that interfaces with libnet to recreate the packet | 133 | 19 |
236,345 | def dissect ( self , b ) : if len ( b ) < 8 : raise ValueError ( "given packet too short" ) return super ( DoIPRawPacket , self ) . dissect ( b ) | Dissect an incoming DoIP packet . | 43 | 8 |
236,346 | def inet_pton ( af , addr ) : print ( 'hello' ) if af == socket . AF_INET : return inet_aton ( addr ) elif af == socket . AF_INET6 : # IPv6: The use of "::" indicates one or more groups of 16 bits of zeros. # We deal with this form of wildcard using a special marker. JOKER = b"*" while b"::" in addr : addr = addr . replace ( b"::" , b":" + JOKER + b":" ) joker_pos = None # The last part of an IPv6 address can be an IPv4 address ipv4_addr = None if b"." in addr : ipv4_addr = addr . split ( b":" ) [ - 1 ] result = b"" parts = addr . split ( b":" ) for part in parts : if part == JOKER : # Wildcard is only allowed once if joker_pos is None : joker_pos = len ( result ) else : raise Exception ( "Illegal syntax for IP address" ) elif part == ipv4_addr : # FIXME: Make sure IPv4 can only be last part # FIXME: inet_aton allows IPv4 addresses with less than 4 octets result += socket . inet_aton ( ipv4_addr ) else : # Each part must be 16bit. Add missing zeroes before decoding. try : result += part . rjust ( 4 , b"0" ) . decode ( "hex" ) except TypeError : raise Exception ( "Illegal syntax for IP address" ) # If there's a wildcard, fill up with zeros to reach 128bit (16 bytes) if JOKER in addr : result = ( result [ : joker_pos ] + b"\x00" * ( 16 - len ( result ) ) + result [ joker_pos : ] ) if len ( result ) != 16 : raise Exception ( "Illegal syntax for IP address" ) return result else : raise Exception ( "Address family not supported" ) | Convert an IP address from text representation into binary form | 445 | 11 |
236,347 | def inet_ntop ( af , addr ) : if af == socket . AF_INET : return inet_ntoa ( addr ) elif af == socket . AF_INET6 : # IPv6 addresses have 128bits (16 bytes) if len ( addr ) != 16 : raise Exception ( "Illegal syntax for IP address" ) parts = [ ] for left in [ 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 ] : try : value = struct . unpack ( "!H" , addr [ left : left + 2 ] ) [ 0 ] hexstr = hex ( value ) [ 2 : ] except TypeError : raise Exception ( "Illegal syntax for IP address" ) parts . append ( hexstr . lstrip ( "0" ) . lower ( ) ) result = b":" . join ( parts ) while b":::" in result : result = result . replace ( b":::" , b"::" ) # Leaving out leading and trailing zeros is only allowed with :: if result . endswith ( b":" ) and not result . endswith ( b"::" ) : result = result + b"0" if result . startswith ( b":" ) and not result . startswith ( b"::" ) : result = b"0" + result return result else : raise Exception ( "Address family not supported yet" ) | Convert an IP address from binary form into text represenation | 293 | 14 |
236,348 | def strand ( s1 , s2 ) : return "" . join ( map ( lambda x , y : chr ( ord ( x ) & ord ( y ) ) , s1 , s2 ) ) | Returns the binary AND of the 2 provided strings s1 and s2 . s1 and s2 must be of same length . | 43 | 26 |
236,349 | def pkcs_mgf1 ( mgfSeed , maskLen , h ) : # steps are those of Appendix B.2.1 if not h in _hashFuncParams : warning ( "pkcs_mgf1: invalid hash (%s) provided" ) return None hLen = _hashFuncParams [ h ] [ 0 ] hFunc = _hashFuncParams [ h ] [ 1 ] if maskLen > 2 ** 32 * hLen : # 1) warning ( "pkcs_mgf1: maskLen > 2**32 * hLen" ) return None T = "" # 2) maxCounter = math . ceil ( float ( maskLen ) / float ( hLen ) ) # 3) counter = 0 while counter < maxCounter : C = pkcs_i2osp ( counter , 4 ) T += hFunc ( mgfSeed + C ) counter += 1 return T [ : maskLen ] | Implements generic MGF1 Mask Generation function as described in Appendix B . 2 . 1 of RFC 3447 . The hash function is passed by name . valid values are md2 md4 md5 sha1 tls sha256 sha384 and sha512 . Returns None on error . | 207 | 62 |
236,350 | def create_temporary_ca_path ( anchor_list , folder ) : # We should probably avoid writing duplicate anchors and also # check if they are all certs. try : if not os . path . isdir ( folder ) : os . makedirs ( folder ) except : return None l = len ( anchor_list ) if l == 0 : return None fmtstr = "%%0%sd.pem" % math . ceil ( math . log ( l , 10 ) ) i = 0 try : for a in anchor_list : fname = os . path . join ( folder , fmtstr % i ) f = open ( fname , "w" ) s = a . output ( fmt = "PEM" ) f . write ( s ) f . close ( ) i += 1 except : return None r , w , e = popen3 ( [ "c_rehash" , folder ] ) r . close ( ) w . close ( ) e . close ( ) return l | Create a CA path folder as defined in OpenSSL terminology by storing all certificates in anchor_list list in PEM format under provided folder and then creating the associated links using the hash as usually done by c_rehash . | 211 | 45 |
236,351 | def _rsadp ( self , c ) : n = self . modulus if type ( c ) is int : c = long ( c ) if type ( c ) is not long or c > n - 1 : warning ( "Key._rsaep() expects a long between 0 and n-1" ) return None return self . key . decrypt ( c ) | Internal method providing raw RSA decryption i . e . simple modular exponentiation of the given ciphertext representative c a long between 0 and n - 1 . | 77 | 31 |
236,352 | def fragment ( pkt , fragsize = 1480 ) : fragsize = ( fragsize + 7 ) // 8 * 8 lst = [ ] for p in pkt : s = bytes ( p [ IP ] . payload ) nb = ( len ( s ) + fragsize - 1 ) // fragsize for i in range ( nb ) : q = p . copy ( ) del q [ IP ] . payload del q [ IP ] . chksum del q [ IP ] . len if i == nb - 1 : q [ IP ] . flags &= ~ 1 else : q [ IP ] . flags |= 1 q [ IP ] . frag = i * fragsize // 8 r = conf . raw_layer ( load = s [ i * fragsize : ( i + 1 ) * fragsize ] ) r . overload_fields = p [ IP ] . payload . overload_fields . copy ( ) q . add_payload ( r ) lst . append ( q ) return lst | Fragment a big IP datagram | 220 | 7 |
236,353 | def parse_version ( str_ ) : v = re . findall ( r"\d+.\d+.\d+" , str_ ) if v : return v [ 0 ] else : print ( "cannot parse string {}" . format ( str_ ) ) raise KeyError | Parses the program s version from a python variable declaration . | 61 | 13 |
236,354 | def make_session ( username = None , password = None , bearer_token = None , extra_headers_dict = None ) : if password is None and bearer_token is None : logger . error ( "No authentication information provided; " "please check your object" ) raise KeyError session = requests . Session ( ) session . trust_env = False headers = { 'Accept-encoding' : 'gzip' , 'User-Agent' : 'twitterdev-search-tweets-python/' + VERSION } if bearer_token : logger . info ( "using bearer token for authentication" ) headers [ 'Authorization' ] = "Bearer {}" . format ( bearer_token ) session . headers = headers else : logger . info ( "using username and password for authentication" ) session . auth = username , password session . headers = headers if extra_headers_dict : headers . update ( extra_headers_dict ) return session | Creates a Requests Session for use . Accepts a bearer token for premiums users and will override username and password information if present . | 200 | 27 |
236,355 | def retry ( func ) : def retried_func ( * args , * * kwargs ) : max_tries = 3 tries = 0 while True : try : resp = func ( * args , * * kwargs ) except requests . exceptions . ConnectionError as exc : exc . msg = "Connection error for session; exiting" raise exc except requests . exceptions . HTTPError as exc : exc . msg = "HTTP error for session; exiting" raise exc if resp . status_code != 200 and tries < max_tries : logger . warning ( "retrying request; current status code: {}" . format ( resp . status_code ) ) tries += 1 # mini exponential backoff here. time . sleep ( tries ** 2 ) continue break if resp . status_code != 200 : error_message = resp . json ( ) [ "error" ] [ "message" ] logger . error ( "HTTP Error code: {}: {}" . format ( resp . status_code , error_message ) ) logger . error ( "Rule payload: {}" . format ( kwargs [ "rule_payload" ] ) ) raise requests . exceptions . HTTPError return resp return retried_func | Decorator to handle API retries and exceptions . Defaults to three retries . | 254 | 18 |
236,356 | def request ( session , url , rule_payload , * * kwargs ) : if isinstance ( rule_payload , dict ) : rule_payload = json . dumps ( rule_payload ) logger . debug ( "sending request" ) result = session . post ( url , data = rule_payload , * * kwargs ) return result | Executes a request with the given payload and arguments . | 78 | 11 |
236,357 | def collect_results ( rule , max_results = 500 , result_stream_args = None ) : if result_stream_args is None : logger . error ( "This function requires a configuration dict for the " "inner ResultStream object." ) raise KeyError rs = ResultStream ( rule_payload = rule , max_results = max_results , * * result_stream_args ) return list ( rs . stream ( ) ) | Utility function to quickly get a list of tweets from a ResultStream without keeping the object around . Requires your args to be configured prior to using . | 92 | 30 |
236,358 | def stream ( self ) : self . init_session ( ) self . check_counts ( ) self . execute_request ( ) self . stream_started = True while True : for tweet in self . current_tweets : if self . total_results >= self . max_results : break yield self . _tweet_func ( tweet ) self . total_results += 1 if self . next_token and self . total_results < self . max_results and self . n_requests <= self . max_requests : self . rule_payload = merge_dicts ( self . rule_payload , { "next" : self . next_token } ) logger . info ( "paging; total requests read so far: {}" . format ( self . n_requests ) ) self . execute_request ( ) else : break logger . info ( "ending stream at {} tweets" . format ( self . total_results ) ) self . current_tweets = None self . session . close ( ) | Main entry point for the data from the API . Will automatically paginate through the results via the next token and return up to max_results tweets or up to max_requests API calls whichever is lower . | 219 | 42 |
236,359 | def init_session ( self ) : if self . session : self . session . close ( ) self . session = make_session ( self . username , self . password , self . bearer_token , self . extra_headers_dict ) | Defines a session object for passing requests . | 50 | 9 |
236,360 | def check_counts ( self ) : if "counts" in re . split ( "[/.]" , self . endpoint ) : logger . info ( "disabling tweet parsing due to counts API usage" ) self . _tweet_func = lambda x : x | Disables tweet parsing if the count API is used . | 57 | 11 |
236,361 | def execute_request ( self ) : if self . n_requests % 20 == 0 and self . n_requests > 1 : logger . info ( "refreshing session" ) self . init_session ( ) resp = request ( session = self . session , url = self . endpoint , rule_payload = self . rule_payload ) self . n_requests += 1 ResultStream . session_request_counter += 1 resp = json . loads ( resp . content . decode ( resp . encoding ) ) self . next_token = resp . get ( "next" , None ) self . current_tweets = resp [ "results" ] | Sends the request to the API and parses the json response . Makes some assumptions about the session length and sets the presence of a next token . | 140 | 30 |
236,362 | def gen_rule_payload ( pt_rule , results_per_call = None , from_date = None , to_date = None , count_bucket = None , tag = None , stringify = True ) : pt_rule = ' ' . join ( pt_rule . split ( ) ) # allows multi-line strings payload = { "query" : pt_rule } if results_per_call is not None and isinstance ( results_per_call , int ) is True : payload [ "maxResults" ] = results_per_call if to_date : payload [ "toDate" ] = convert_utc_time ( to_date ) if from_date : payload [ "fromDate" ] = convert_utc_time ( from_date ) if count_bucket : if set ( [ "day" , "hour" , "minute" ] ) & set ( [ count_bucket ] ) : payload [ "bucket" ] = count_bucket del payload [ "maxResults" ] else : logger . error ( "invalid count bucket: provided {}" . format ( count_bucket ) ) raise ValueError if tag : payload [ "tag" ] = tag return json . dumps ( payload ) if stringify else payload | Generates the dict or json payload for a PowerTrack rule . | 272 | 13 |
236,363 | def gen_params_from_config ( config_dict ) : if config_dict . get ( "count_bucket" ) : logger . warning ( "change your endpoint to the count endpoint; this is " "default behavior when the count bucket " "field is defined" ) endpoint = change_to_count_endpoint ( config_dict . get ( "endpoint" ) ) else : endpoint = config_dict . get ( "endpoint" ) def intify ( arg ) : if not isinstance ( arg , int ) and arg is not None : return int ( arg ) else : return arg # this parameter comes in as a string when it's parsed results_per_call = intify ( config_dict . get ( "results_per_call" , None ) ) rule = gen_rule_payload ( pt_rule = config_dict [ "pt_rule" ] , from_date = config_dict . get ( "from_date" , None ) , to_date = config_dict . get ( "to_date" , None ) , results_per_call = results_per_call , count_bucket = config_dict . get ( "count_bucket" , None ) ) _dict = { "endpoint" : endpoint , "username" : config_dict . get ( "username" ) , "password" : config_dict . get ( "password" ) , "bearer_token" : config_dict . get ( "bearer_token" ) , "extra_headers_dict" : config_dict . get ( "extra_headers_dict" , None ) , "rule_payload" : rule , "results_per_file" : intify ( config_dict . get ( "results_per_file" ) ) , "max_results" : intify ( config_dict . get ( "max_results" ) ) , "max_pages" : intify ( config_dict . get ( "max_pages" , None ) ) } return _dict | Generates parameters for a ResultStream from a dictionary . | 435 | 11 |
236,364 | def infer_endpoint ( rule_payload ) : bucket = ( rule_payload if isinstance ( rule_payload , dict ) else json . loads ( rule_payload ) ) . get ( "bucket" ) return "counts" if bucket else "search" | Infer which endpoint should be used for a given rule payload . | 60 | 13 |
236,365 | def validate_count_api ( rule_payload , endpoint ) : rule = ( rule_payload if isinstance ( rule_payload , dict ) else json . loads ( rule_payload ) ) bucket = rule . get ( 'bucket' ) counts = set ( endpoint . split ( "/" ) ) & { "counts.json" } if len ( counts ) == 0 : if bucket is not None : msg = ( """There is a count bucket present in your payload, but you are using not using the counts API. Please check your endpoints and try again""" ) logger . error ( msg ) raise ValueError | Ensures that the counts api is set correctly in a payload . | 133 | 14 |
236,366 | def partition ( iterable , chunk_size , pad_none = False ) : args = [ iter ( iterable ) ] * chunk_size if not pad_none : return zip ( * args ) else : return it . zip_longest ( * args ) | adapted from Toolz . Breaks an iterable into n iterables up to the certain chunk size padding with Nones if availble . | 55 | 29 |
236,367 | def write_ndjson ( filename , data_iterable , append = False , * * kwargs ) : write_mode = "ab" if append else "wb" logger . info ( "writing to file {}" . format ( filename ) ) with codecs . open ( filename , write_mode , "utf-8" ) as outfile : for item in data_iterable : outfile . write ( json . dumps ( item ) + "\n" ) yield item | Generator that writes newline - delimited json to a file and returns items from an iterable . | 101 | 21 |
236,368 | def write_result_stream ( result_stream , filename_prefix = None , results_per_file = None , * * kwargs ) : if isinstance ( result_stream , types . GeneratorType ) : stream = result_stream else : stream = result_stream . stream ( ) file_time_formatter = "%Y-%m-%dT%H_%M_%S" if filename_prefix is None : filename_prefix = "twitter_search_results" if results_per_file : logger . info ( "chunking result stream to files with {} tweets per file" . format ( results_per_file ) ) chunked_stream = partition ( stream , results_per_file , pad_none = True ) for chunk in chunked_stream : chunk = filter ( lambda x : x is not None , chunk ) curr_datetime = ( datetime . datetime . utcnow ( ) . strftime ( file_time_formatter ) ) _filename = "{}_{}.json" . format ( filename_prefix , curr_datetime ) yield from write_ndjson ( _filename , chunk ) else : curr_datetime = ( datetime . datetime . utcnow ( ) . strftime ( file_time_formatter ) ) _filename = "{}.json" . format ( filename_prefix ) yield from write_ndjson ( _filename , stream ) | Wraps a ResultStream object to save it to a file . This function will still return all data from the result stream as a generator that wraps the write_ndjson method . | 305 | 36 |
236,369 | def _load_yaml_credentials ( filename = None , yaml_key = None ) : try : with open ( os . path . expanduser ( filename ) ) as f : search_creds = yaml . safe_load ( f ) [ yaml_key ] except FileNotFoundError : logger . error ( "cannot read file {}" . format ( filename ) ) search_creds = { } except KeyError : logger . error ( "{} is missing the provided key: {}" . format ( filename , yaml_key ) ) search_creds = { } return search_creds | Loads and parses credentials in a YAML file . Catches common exceptions and returns an empty dict on error which will be handled downstream . | 136 | 30 |
236,370 | def _generate_bearer_token ( consumer_key , consumer_secret ) : data = [ ( 'grant_type' , 'client_credentials' ) ] resp = requests . post ( OAUTH_ENDPOINT , data = data , auth = ( consumer_key , consumer_secret ) ) logger . warning ( "Grabbing bearer token from OAUTH" ) if resp . status_code >= 400 : logger . error ( resp . text ) resp . raise_for_status ( ) return resp . json ( ) [ 'access_token' ] | Return the bearer token for a given pair of consumer key and secret values . | 124 | 15 |
236,371 | def fi_business_id ( business_id ) : if not business_id or not re . match ( business_id_pattern , business_id ) : return False factors = [ 7 , 9 , 10 , 5 , 8 , 4 , 2 ] numbers = map ( int , business_id [ : 7 ] ) checksum = int ( business_id [ 8 ] ) sum_ = sum ( f * n for f , n in zip ( factors , numbers ) ) modulo = sum_ % 11 return ( 11 - modulo == checksum ) or ( modulo == 0 and checksum == 0 ) | Validate a Finnish Business ID . | 128 | 7 |
236,372 | def fi_ssn ( ssn , allow_temporal_ssn = True ) : if not ssn : return False result = re . match ( ssn_pattern , ssn ) if not result : return False gd = result . groupdict ( ) checksum = int ( gd [ 'date' ] + gd [ 'serial' ] ) return ( int ( gd [ 'serial' ] ) >= 2 and ( allow_temporal_ssn or int ( gd [ 'serial' ] ) <= 899 ) and ssn_checkmarks [ checksum % len ( ssn_checkmarks ) ] == gd [ 'checksum' ] ) | Validate a Finnish Social Security Number . | 144 | 8 |
236,373 | def modcheck ( value ) : # move country code and check numbers to end rearranged = value [ 4 : ] + value [ : 4 ] # convert letters to numbers converted = [ char_value ( char ) for char in rearranged ] # interpret as integer integerized = int ( '' . join ( [ str ( i ) for i in converted ] ) ) return ( integerized % 97 == 1 ) | Check if the value string passes the mod97 - test . | 84 | 12 |
236,374 | def func_args_as_dict ( func , args , kwargs ) : if six . PY2 : _getargspec = inspect . getargspec else : _getargspec = inspect . getfullargspec arg_names = list ( OrderedDict . fromkeys ( itertools . chain ( _getargspec ( func ) [ 0 ] , kwargs . keys ( ) ) ) ) return OrderedDict ( list ( six . moves . zip ( arg_names , args ) ) + list ( kwargs . items ( ) ) ) | Return given function s positional and key value arguments as an ordered dictionary . | 122 | 14 |
236,375 | def validator ( func , * args , * * kwargs ) : def wrapper ( func , * args , * * kwargs ) : value = func ( * args , * * kwargs ) if not value : return ValidationFailure ( func , func_args_as_dict ( func , args , kwargs ) ) return True return decorator ( wrapper , func ) | A decorator that makes given function validator . | 82 | 10 |
236,376 | def length ( value , min = None , max = None ) : if ( min is not None and min < 0 ) or ( max is not None and max < 0 ) : raise AssertionError ( '`min` and `max` need to be greater than zero.' ) return between ( len ( value ) , min = min , max = max ) | Return whether or not the length of given string is within a specified range . | 75 | 15 |
236,377 | def url ( value , public = False ) : result = pattern . match ( value ) if not public : return result return result and not any ( ( result . groupdict ( ) . get ( key ) for key in ( 'private_ip' , 'private_host' ) ) ) | Return whether or not given value is a valid URL . | 60 | 11 |
236,378 | def ipv4 ( value ) : groups = value . split ( '.' ) if len ( groups ) != 4 or any ( not x . isdigit ( ) for x in groups ) : return False return all ( 0 <= int ( part ) < 256 for part in groups ) | Return whether or not given value is a valid IP version 4 address . | 58 | 14 |
236,379 | def post_report ( coverage , args ) : response = requests . post ( URL , files = { 'json_file' : json . dumps ( coverage ) } , verify = ( not args . skip_ssl_verify ) ) try : result = response . json ( ) except ValueError : result = { 'error' : 'Failure to submit data. ' 'Response [%(status)s]: %(text)s' % { 'status' : response . status_code , 'text' : response . text } } print ( result ) if 'error' in result : return result [ 'error' ] return 0 | Post coverage report to coveralls . io . | 132 | 9 |
236,380 | def is_source_file ( args , filepath ) : if args . extension : return os . path . splitext ( filepath ) [ 1 ] in args . extension else : return os . path . splitext ( filepath ) [ 1 ] in _CPP_EXTENSIONS | Returns true if it is a C ++ source file . | 62 | 11 |
236,381 | def exclude_paths ( args ) : results = [ ] if args . exclude : for excl_path in args . exclude : results . append ( os . path . abspath ( os . path . join ( args . root , excl_path ) ) ) return results | Returns the absolute paths for excluded path . | 58 | 8 |
236,382 | def create_exclude_rules ( args ) : global _cached_exclude_rules if _cached_exclude_rules is not None : return _cached_exclude_rules rules = [ ] for excl_path in args . exclude : abspath = os . path . abspath ( os . path . join ( args . root , excl_path ) ) rules . append ( ( abspath , True ) ) for incl_path in args . include : abspath = os . path . abspath ( os . path . join ( args . root , incl_path ) ) rules . append ( ( abspath , False ) ) _cached_exclude_rules = sorted ( rules , key = lambda p : p [ 0 ] ) return _cached_exclude_rules | Creates the exlude rules | 170 | 6 |
236,383 | def is_excluded_path ( args , filepath ) : # Try regular expressions first. for regexp_exclude_path in args . regexp : if re . match ( regexp_exclude_path , filepath ) : return True abspath = os . path . abspath ( filepath ) if args . include : # If the file is outside of any include directories. out_of_include_dirs = True for incl_path in args . include : absolute_include_path = os . path . abspath ( os . path . join ( args . root , incl_path ) ) if is_child_dir ( absolute_include_path , abspath ) : out_of_include_dirs = False break if out_of_include_dirs : return True excl_rules = create_exclude_rules ( args ) for i , rule in enumerate ( excl_rules ) : if rule [ 0 ] == abspath : return rule [ 1 ] if is_child_dir ( rule [ 0 ] , abspath ) : # continue to try to longest match. last_result = rule [ 1 ] for j in range ( i + 1 , len ( excl_rules ) ) : rule_deep = excl_rules [ j ] if not is_child_dir ( rule_deep [ 0 ] , abspath ) : break last_result = rule_deep [ 1 ] return last_result return False | Returns true if the filepath is under the one of the exclude path . | 308 | 15 |
236,384 | def filter_dirs ( root , dirs , excl_paths ) : filtered_dirs = [ ] for dirpath in dirs : abspath = os . path . abspath ( os . path . join ( root , dirpath ) ) if os . path . basename ( abspath ) in _SKIP_DIRS : continue if abspath not in excl_paths : filtered_dirs . append ( dirpath ) return filtered_dirs | Filter directory paths based on the exclusion rules defined in excl_paths . | 100 | 16 |
236,385 | def parse_gcov_file ( args , fobj , filename ) : coverage = [ ] ignoring = False for line in fobj : report_fields = line . decode ( 'utf-8' , 'replace' ) . split ( ':' , 2 ) if len ( report_fields ) == 1 : continue line_num = report_fields [ 1 ] . strip ( ) if line_num == '' : continue cov_num = report_fields [ 0 ] . strip ( ) line_num = int ( line_num ) text = report_fields [ 2 ] if line_num == 0 : continue if re . search ( r'\bLCOV_EXCL_START\b' , text ) : if ignoring : sys . stderr . write ( "Warning: %s:%d: nested LCOV_EXCL_START, " "please fix\n" % ( filename , line_num ) ) ignoring = True elif re . search ( r'\bLCOV_EXCL_(STOP|END)\b' , text ) : if not ignoring : sys . stderr . write ( "Warning: %s:%d: LCOV_EXCL_STOP outside of " "exclusion zone, please fix\n" % ( filename , line_num ) ) if 'LCOV_EXCL_END' in text : sys . stderr . write ( "Warning: %s:%d: LCOV_EXCL_STOP is the " "correct keyword\n" % ( filename , line_num ) ) ignoring = False if cov_num == '-' : coverage . append ( None ) elif cov_num == '#####' : # Avoid false positives. if ( ignoring or any ( [ re . search ( pattern , text ) for pattern in args . exclude_lines_pattern ] ) ) : coverage . append ( None ) else : coverage . append ( 0 ) elif cov_num == '=====' : # This is indicitive of a gcov output parse # error. coverage . append ( 0 ) else : coverage . append ( int ( cov_num . rstrip ( '*' ) ) ) return coverage | Parses the content of . gcov file | 468 | 11 |
236,386 | def parse_lcov_file_info ( args , filepath , line_iter , line_coverage_re , file_end_string ) : coverage = [ ] lines_covered = [ ] for line in line_iter : if line != "end_of_record" : line_coverage_match = line_coverage_re . match ( line ) if line_coverage_match : line_no = line_coverage_match . group ( 1 ) cov_count = int ( line_coverage_match . group ( 2 ) ) if args . max_cov_count : if cov_count > args . max_cov_count : cov_count = args . max_cov_count + 1 lines_covered . append ( ( line_no , cov_count ) ) else : break num_code_lines = len ( [ line . rstrip ( '\n' ) for line in open ( filepath , 'r' ) ] ) coverage = [ None ] * num_code_lines for line_covered in lines_covered : coverage [ int ( line_covered [ 0 ] ) - 1 ] = line_covered [ 1 ] return coverage | Parse the file content in lcov info file | 253 | 11 |
236,387 | def combine_reports ( original , new ) : if original is None : return new report = { } report [ 'name' ] = original [ 'name' ] report [ 'source_digest' ] = original [ 'source_digest' ] coverage = [ ] for original_num , new_num in zip ( original [ 'coverage' ] , new [ 'coverage' ] ) : if original_num is None : coverage . append ( new_num ) elif new_num is None : coverage . append ( original_num ) else : coverage . append ( original_num + new_num ) report [ 'coverage' ] = coverage return report | Combines two gcov reports for a file into one by adding the number of hits on each line | 141 | 21 |
236,388 | def collect_non_report_files ( args , discovered_files ) : excl_paths = exclude_paths ( args ) abs_root = os . path . abspath ( args . root ) non_report_files = [ ] for root , dirs , files in os . walk ( args . root , followlinks = args . follow_symlinks ) : dirs [ : ] = filter_dirs ( root , dirs , excl_paths ) for filename in files : if not is_source_file ( args , filename ) : continue abs_filepath = os . path . join ( os . path . abspath ( root ) , filename ) if is_excluded_path ( args , abs_filepath ) : continue filepath = os . path . relpath ( abs_filepath , abs_root ) if filepath not in discovered_files : src_report = { } src_report [ 'name' ] = posix_path ( filepath ) coverage = [ ] with io . open ( abs_filepath , mode = 'rb' ) as fobj : for _ in fobj : coverage . append ( None ) fobj . seek ( 0 ) src_report [ 'source_digest' ] = hashlib . md5 ( fobj . read ( ) ) . hexdigest ( ) src_report [ 'coverage' ] = coverage non_report_files . append ( src_report ) return non_report_files | Collects the source files that have no coverage reports . | 314 | 11 |
236,389 | def parse_yaml_config ( args ) : try : import yaml except ImportError : yaml = None yml = { } try : with open ( args . coveralls_yaml , 'r' ) as fp : if not yaml : raise SystemExit ( 'PyYAML is required for parsing configuration' ) yml = yaml . load ( fp ) except IOError : pass yml = yml or { } return yml | Parse yaml config | 98 | 5 |
236,390 | def run ( ) : import json import os import sys from . import coverage , report args = coverage . create_args ( sys . argv [ 1 : ] ) if args . verbose : print ( 'encodings: {}' . format ( args . encodings ) ) yml = parse_yaml_config ( args ) if not args . repo_token : # try get token from yaml first args . repo_token = yml . get ( 'repo_token' , '' ) if not args . repo_token : # use environment COVERALLS_REPO_TOKEN as a fallback args . repo_token = os . environ . get ( 'COVERALLS_REPO_TOKEN' ) args . service_name = yml . get ( 'service_name' , 'travis-ci' ) if not args . gcov_options : args . gcov_options = yml . get ( 'gcov_options' , '' ) if not args . root : args . root = yml . get ( 'root' , '.' ) if not args . build_root : args . build_root = yml . get ( 'build_root' , '' ) args . exclude . extend ( yml . get ( 'exclude' , [ ] ) ) args . include . extend ( yml . get ( 'include' , [ ] ) ) args . exclude_lines_pattern . extend ( yml . get ( 'exclude_lines_pattern' , [ ] ) ) args . service_job_id = os . environ . get ( 'TRAVIS_JOB_ID' , '' ) if args . repo_token == '' and args . service_job_id == '' : raise ValueError ( "\nno coveralls.io token specified and no travis job id found\n" "see --help for examples on how to specify a token\n" ) if not args . no_gcov : coverage . run_gcov ( args ) cov_report = coverage . collect ( args ) if args . verbose : print ( cov_report ) if args . dryrun : return 0 if args . dump : args . dump . write ( json . dumps ( cov_report ) ) return 0 return report . post_report ( cov_report , args ) | Run cpp coverage . | 498 | 5 |
236,391 | def gitrepo ( cwd ) : repo = Repository ( cwd ) if not repo . valid ( ) : return { } return { 'head' : { 'id' : repo . gitlog ( '%H' ) , 'author_name' : repo . gitlog ( '%aN' ) , 'author_email' : repo . gitlog ( '%ae' ) , 'committer_name' : repo . gitlog ( '%cN' ) , 'committer_email' : repo . gitlog ( '%ce' ) , 'message' : repo . gitlog ( '%s' ) } , 'branch' : os . environ . get ( 'TRAVIS_BRANCH' , os . environ . get ( 'APPVEYOR_REPO_BRANCH' , repo . git ( 'rev-parse' , '--abbrev-ref' , 'HEAD' ) [ 1 ] . strip ( ) ) ) , 'remotes' : [ { 'name' : line . split ( ) [ 0 ] , 'url' : line . split ( ) [ 1 ] } for line in repo . git ( 'remote' , '-v' ) [ 1 ] if '(fetch)' in line ] } | Return hash of Git data that can be used to display more information to users . | 275 | 16 |
236,392 | def _verify ( function ) : # @functools.wraps def wrapped ( pin , * args , * * kwargs ) : pin = int ( pin ) if pin not in _open : ppath = gpiopath ( pin ) if not os . path . exists ( ppath ) : log . debug ( "Creating Pin {0}" . format ( pin ) ) with _export_lock : with open ( pjoin ( gpio_root , 'export' ) , 'w' ) as f : _write ( f , pin ) value = open ( pjoin ( ppath , 'value' ) , FMODE ) direction = open ( pjoin ( ppath , 'direction' ) , FMODE ) _open [ pin ] = PinState ( value = value , direction = direction ) return function ( pin , * args , * * kwargs ) return wrapped | decorator to ensure pin is properly set up | 186 | 10 |
236,393 | def set ( pin , value ) : if value is LOW : value = 0 value = int ( bool ( value ) ) log . debug ( "Write {0}: {1}" . format ( pin , value ) ) f = _open [ pin ] . value _write ( f , value ) | set the pin value to 0 or 1 | 61 | 8 |
236,394 | def end ( self ) : # Note: Vend is just a macro; use 'Vfinish' instead # Note also the the same C function is used to end # the VS interface _checkErr ( 'vend' , _C . Vfinish ( self . _hdf_inst . _id ) , "cannot terminate V interface" ) self . _hdf_inst = None | Close the V interface . | 84 | 5 |
236,395 | def attach ( self , num_name , write = 0 ) : if isinstance ( num_name , bytes ) : num = self . find ( num_name ) else : num = num_name vg_id = _C . Vattach ( self . _hdf_inst . _id , num , write and 'w' or 'r' ) _checkErr ( 'vattach' , vg_id , "cannot attach Vgroup" ) return VG ( self , vg_id ) | Open an existing vgroup given its name or its reference number or create a new vgroup returning a VG instance for that vgroup . | 109 | 27 |
236,396 | def create ( self , name ) : vg = self . attach ( - 1 , 1 ) vg . _name = name return vg | Create a new vgroup and assign it a name . | 30 | 11 |
236,397 | def find ( self , name ) : refnum = _C . Vfind ( self . _hdf_inst . _id , name ) if not refnum : raise HDF4Error ( "vgroup not found" ) return refnum | Find a vgroup given its name returning its reference number if found . | 51 | 14 |
236,398 | def findclass ( self , name ) : refnum = _C . Vfindclass ( self . _hdf_inst . _id , name ) if not refnum : raise HDF4Error ( "vgroup not found" ) return refnum | Find a vgroup given its class name returning its reference number if found . | 53 | 15 |
236,399 | def delete ( self , num_name ) : try : vg = self . attach ( num_name , 1 ) except HDF4Error as msg : raise HDF4Error ( "delete: no such vgroup" ) # ATTENTION: The HDF documentation says that the vgroup_id # is passed to Vdelete(). This is wrong. # The vgroup reference number must instead be passed. refnum = vg . _refnum vg . detach ( ) _checkErr ( 'delete' , _C . Vdelete ( self . _hdf_inst . _id , refnum ) , "error deleting vgroup" ) | Delete from the HDF file the vgroup identified by its reference number or its name . | 137 | 18 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.