idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
48,400
def attachComplete ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x3 ) packet = a / b return packet
ATTACH COMPLETE Section 9 . 4 . 3
48,401
def detachRequest ( GmmCause_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x5 ) c = DetachTypeAndForceToStandby ( ) packet = a / b / c if GmmCause_presence is 1 : e = GmmCause ( ieiGC = 0x25 ) packet = packet / e return packet
DETACH REQUEST Section 9 . 4 . 5
48,402
def detachRequestMsOriginating ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x5 ) c = DetachTypeAndSpareHalfOctets ( ) packet = a / b / c return packet
DETACH REQUEST Section 9 . 4 . 5 . 2
48,403
def detachAcceptMsTerminated ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x6 ) packet = a / b return packet
DETACH ACCEPT Section 9 . 4 . 6 . 1
48,404
def detachAcceptMsOriginating ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x6 ) c = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c return packet
DETACH ACCEPT Section 9 . 4 . 6 . 2
48,405
def ptmsiReallocationCommand ( PTmsiSignature_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x10 ) c = MobileId ( ) d = RoutingAreaIdentification ( ) e = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c / d / e if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g return packet
P - TMSI REALLOCATION COMMAND Section 9 . 4 . 7
48,406
def ptmsiReallocationComplete ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x11 ) packet = a / b return packet
P - TMSI REALLOCATION COMPLETE Section 9 . 4 . 8
48,407
def authenticationAndCipheringRequest ( AuthenticationParameterRAND_presence = 0 , CiphKeySeqNr_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x12 ) d = CipheringAlgorithmAndImeisvRequest ( ) e = ForceToStandbyAndAcReferenceNumber ( ) packet = a / b / d / e if AuthenticationParameterRAND_presence is 1 : g = AuthenticationParameterRAND ( ieiAPR = 0x21 ) packet = packet / g if CiphKeySeqNr_presence is 1 : h = CiphKeySeqNrHdr ( ieiCKSN = 0x08 , eightBitCKSN = 0x0 ) packet = packet / h return packet
AUTHENTICATION AND CIPHERING REQUEST Section 9 . 4 . 9
48,408
def authenticationAndCipheringResponse ( AuthenticationParameterSRES_presence = 0 , MobileId_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x13 ) c = AcReferenceNumberAndSpareHalfOctets ( ) packet = a / b / c if AuthenticationParameterSRES_presence is 1 : e = AuthenticationParameterSRES ( ieiAPS = 0x22 ) packet = packet / e if MobileId_presence is 1 : f = MobileIdHdr ( ieiMI = 0x23 , eightBitMI = 0x0 ) packet = packet / f return packet
AUTHENTICATION AND CIPHERING RESPONSE Section 9 . 4 . 10
48,409
def authenticationAndCipheringReject ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x14 ) packet = a / b return packet
AUTHENTICATION AND CIPHERING REJECT Section 9 . 4 . 11
48,410
def routingAreaUpdateRequest ( PTmsiSignature_presence = 0 , GprsTimer_presence = 0 , DrxParameter_presence = 0 , TmsiStatus_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x8 ) c = UpdateTypeAndCiphKeySeqNr ( ) e = RoutingAreaIdentification ( ) f = MsNetworkCapability ( ) packet = a / b / c / e / f if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g if GprsTimer_presence is 1 : h = GprsTimer ( ieiGT = 0x17 ) packet = packet / h if DrxParameter_presence is 1 : i = DrxParameter ( ieiDP = 0x27 ) packet = packet / i if TmsiStatus_presence is 1 : j = TmsiStatus ( ieiTS = 0x9 ) packet = packet / j return packet
ROUTING AREA UPDATE REQUEST Section 9 . 4 . 14
48,411
def routingAreaUpdateAccept ( PTmsiSignature_presence = 0 , MobileId_presence = 0 , MobileId_presence1 = 0 , ReceiveNpduNumbersList_presence = 0 , GprsTimer_presence = 0 , GmmCause_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x9 ) c = ForceToStandbyAndUpdateResult ( ) e = GprsTimer ( ) f = RoutingAreaIdentification ( ) packet = a / b / c / e / f if PTmsiSignature_presence is 1 : g = PTmsiSignature ( ieiPTS = 0x19 ) packet = packet / g if MobileId_presence is 1 : h = MobileIdHdr ( ieiMI = 0x18 , eightBitMI = 0x0 ) packet = packet / h if MobileId_presence1 is 1 : i = MobileIdHdr ( ieiMI = 0x23 , eightBitMI = 0x0 ) packet = packet / i if ReceiveNpduNumbersList_presence is 1 : j = ReceiveNpduNumbersList ( ieiRNNL = 0x26 ) packet = packet / j if GprsTimer_presence is 1 : k = GprsTimer ( ieiGT = 0x17 ) packet = packet / k if GmmCause_presence is 1 : l = GmmCause ( ieiGC = 0x25 ) packet = packet / l return packet
ROUTING AREA UPDATE ACCEPT Section 9 . 4 . 15
48,412
def routingAreaUpdateComplete ( ReceiveNpduNumbersList_presence = 0 ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0xa ) packet = a / b if ReceiveNpduNumbersList_presence is 1 : c = ReceiveNpduNumbersList ( ieiRNNL = 0x26 ) packet = packet / c return packet
ROUTING AREA UPDATE COMPLETE Section 9 . 4 . 16
48,413
def routingAreaUpdateReject ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0xb ) c = GmmCause ( ) d = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c / d return packet
ROUTING AREA UPDATE REJECT Section 9 . 4 . 17
48,414
def gmmStatus ( ) : a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x20 ) c = GmmCause ( ) packet = a / b / c return packet
GMM STATUS Section 9 . 4 . 18
48,415
def activatePdpContextRequest ( AccessPointName_presence = 0 , ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x41 ) c = NetworkServiceAccessPointIdentifier ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) f = PacketDataProtocolAddress ( ) packet = a / b / c / d / e / f if AccessPointName_presence is 1 : g = AccessPointName ( ieiAPN = 0x28 ) packet = packet / g if ProtocolConfigurationOptions_presence is 1 : h = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / h return packet
ACTIVATE PDP CONTEXT REQUEST Section 9 . 5 . 1
48,416
def activatePdpContextAccept ( PacketDataProtocolAddress_presence = 0 , ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x42 ) c = LlcServiceAccessPointIdentifier ( ) d = QualityOfService ( ) e = RadioPriorityAndSpareHalfOctets ( ) packet = a / b / c / d / e if PacketDataProtocolAddress_presence is 1 : f = PacketDataProtocolAddress ( ieiPDPA = 0x2B ) packet = packet / f if ProtocolConfigurationOptions_presence is 1 : g = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / g return packet
ACTIVATE PDP CONTEXT ACCEPT Section 9 . 5 . 2
48,417
def requestPdpContextActivation ( AccessPointName_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x44 ) c = PacketDataProtocolAddress ( ) packet = a / b / c if AccessPointName_presence is 1 : d = AccessPointName ( ieiAPN = 0x28 ) packet = packet / d return packet
REQUEST PDP CONTEXT ACTIVATION Section 9 . 5 . 4
48,418
def requestPdpContextActivationReject ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x45 ) c = SmCause ( ) packet = a / b / c return packet
REQUEST PDP CONTEXT ACTIVATION REJECT Section 9 . 5 . 5
48,419
def modifyPdpContextRequest ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x48 ) c = RadioPriorityAndSpareHalfOctets ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) packet = a / b / c / d / e return packet
MODIFY PDP CONTEXT REQUEST Section 9 . 5 . 6
48,420
def modifyPdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x45 ) packet = a / b return packet
MODIFY PDP CONTEXT ACCEPT Section 9 . 5 . 7
48,421
def deactivatePdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x47 ) packet = a / b return packet
DEACTIVATE PDP CONTEXT ACCEPT Section 9 . 5 . 9
48,422
def activateAaPdpContextRequest ( AccessPointName_presence = 0 , ProtocolConfigurationOptions_presence = 0 , GprsTimer_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x50 ) c = NetworkServiceAccessPointIdentifier ( ) d = LlcServiceAccessPointIdentifier ( ) e = QualityOfService ( ) f = PacketDataProtocolAddress ( ) packet = a / b / c / d / e / f if AccessPointName_presence is 1 : g = AccessPointName ( ieiAPN = 0x28 ) packet = packet / g if ProtocolConfigurationOptions_presence is 1 : h = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / h if GprsTimer_presence is 1 : i = GprsTimer ( ieiGT = 0x29 ) packet = packet / i return packet
ACTIVATE AA PDP CONTEXT REQUEST Section 9 . 5 . 10
48,423
def activateAaPdpContextAccept ( ProtocolConfigurationOptions_presence = 0 , GprsTimer_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x51 ) c = LlcServiceAccessPointIdentifier ( ) d = QualityOfService ( ) e = MobileId ( ) f = PacketDataProtocolAddress ( ) g = RadioPriorityAndSpareHalfOctets ( ) packet = a / b / c / d / e / f / g if ProtocolConfigurationOptions_presence is 1 : i = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / i if GprsTimer_presence is 1 : j = GprsTimer ( ieiGT = 0x29 ) packet = packet / j return packet
ACTIVATE AA PDP CONTEXT ACCEPT Section 9 . 5 . 11
48,424
def activateAaPdpContextReject ( ProtocolConfigurationOptions_presence = 0 ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x52 ) c = SmCause ( ) packet = a / b / c if ProtocolConfigurationOptions_presence is 1 : d = ProtocolConfigurationOptions ( ieiPCO = 0x27 ) packet = packet / d return packet
ACTIVATE AA PDP CONTEXT REJECT Section 9 . 5 . 12
48,425
def deactivateAaPdpContextRequest ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x53 ) c = AaDeactivationCauseAndSpareHalfOctets ( ) packet = a / b / c return packet
DEACTIVATE AA PDP CONTEXT REQUEST Section 9 . 5 . 13
48,426
def deactivateAaPdpContextAccept ( ) : a = TpPd ( pd = 0x8 ) b = MessageType ( mesType = 0x54 ) packet = a / b return packet
DEACTIVATE AA PDP CONTEXT ACCEPT Section 9 . 5 . 14
48,427
def ls ( obj = None ) : if obj is None : import builtins all = builtins . __dict__ . copy ( ) all . update ( globals ( ) ) objlst = sorted ( conf . layers , key = lambda x : x . __name__ ) for o in objlst : print ( "%-10s : %s" % ( o . __name__ , o . name ) ) else : if isinstance ( obj , type ) and issubclass ( obj , Packet ) : for f in obj . fields_desc : print ( "%-10s : %-20s = (%s)" % ( f . name , f . __class__ . __name__ , repr ( f . default ) ) ) elif isinstance ( obj , Packet ) : for f in obj . fields_desc : print ( "%-10s : %-20s = %-15s (%s)" % ( f . name , f . __class__ . __name__ , repr ( getattr ( obj , f . name ) ) , repr ( f . default ) ) ) if not isinstance ( obj . payload , NoPayload ) : print ( "--" ) ls ( obj . payload ) else : print ( "Not a packet class. Type 'ls()' to list packet classes." )
List available layers or infos on a given layer
48,428
def haslayer ( self , cls ) : if self . __class__ == cls or self . __class__ . __name__ == cls : return 1 for f in self . packetfields : fvalue_gen = self . getfieldval ( f . name ) if fvalue_gen is None : continue if not f . islist : fvalue_gen = SetGen ( fvalue_gen , _iterpacket = 0 ) for fvalue in fvalue_gen : if isinstance ( fvalue , Packet ) : ret = fvalue . haslayer ( cls ) if ret : return ret return self . payload . haslayer ( cls )
true if self has a layer that is an instance of cls . Superseded by cls in self syntax .
48,429
def getlayer ( self , cls , nb = 1 , _track = None ) : if type ( cls ) is int : nb = cls + 1 cls = None if type ( cls ) is str and "." in cls : ccls , fld = cls . split ( "." , 1 ) else : ccls , fld = cls , None if cls is None or self . __class__ == cls or self . __class__ . name == ccls : if nb == 1 : if fld is None : return self else : return self . getfieldval ( fld ) else : nb -= 1 for f in self . packetfields : fvalue_gen = self . getfieldval ( f . name ) if fvalue_gen is None : continue if not f . islist : fvalue_gen = SetGen ( fvalue_gen , _iterpacket = 0 ) for fvalue in fvalue_gen : if isinstance ( fvalue , Packet ) : track = [ ] ret = fvalue . getlayer ( cls , nb , _track = track ) if ret is not None : return ret nb = track [ 0 ] return self . payload . getlayer ( cls , nb , _track = _track )
Return the nb^th layer that is an instance of cls .
48,430
def show ( self , indent = 3 , lvl = "" , label_lvl = "" ) : ct = conf . color_theme print ( "%s%s %s %s" % ( label_lvl , ct . punct ( "###[" ) , ct . layer_name ( self . name ) , ct . punct ( "]###" ) ) ) for f in self . fields_desc : if isinstance ( f , ConditionalField ) and not f . _evalcond ( self ) : continue if isinstance ( f , Emph ) or f in conf . emph : ncol = ct . emph_field_name vcol = ct . emph_field_value else : ncol = ct . field_name vcol = ct . field_value fvalue = self . getfieldval ( f . name ) if isinstance ( fvalue , Packet ) or ( f . islist and f . holds_packets and type ( fvalue ) is list ) : print ( "%s \\%-10s\\" % ( label_lvl + lvl , ncol ( f . name ) ) ) fvalue_gen = SetGen ( fvalue , _iterpacket = 0 ) for fvalue in fvalue_gen : fvalue . show ( indent = indent , label_lvl = label_lvl + lvl + " |" ) else : begn = "%s %-10s%s " % ( label_lvl + lvl , ncol ( f . name ) , ct . punct ( "=" ) , ) reprval = f . i2repr ( self , fvalue ) if type ( reprval ) is str : reprval = reprval . replace ( "\n" , "\n" + " " * ( len ( label_lvl ) + len ( lvl ) + len ( f . name ) + 4 ) ) print ( "%s%s" % ( begn , vcol ( reprval ) ) ) self . payload . show ( indent = indent , lvl = lvl + ( " " * indent * self . show_indent ) , label_lvl = label_lvl )
Prints a hierarchical view of the packet . indent gives the size of indentation for each layer .
48,431
def decode_payload_as ( self , cls ) : s = bytes ( self . payload ) self . payload = cls ( s , _internal = 1 , _underlayer = self ) pp = self while pp . underlayer is not None : pp = pp . underlayer self . payload . dissection_done ( pp )
Reassembles the payload and decode it using another packet class
48,432
def libnet ( self ) : print ( "libnet_build_%s(" % self . __class__ . name . lower ( ) ) det = self . __class__ ( str ( self ) ) for f in self . fields_desc : val = det . getfieldval ( f . name ) if val is None : val = 0 elif type ( val ) is int : val = str ( val ) else : val = '"%s"' % str ( val ) print ( "\t%s, \t\t/* %s */" % ( val , f . name ) ) print ( ");" )
Not ready yet . Should give the necessary C code that interfaces with libnet to recreate the packet
48,433
def dissect ( self , b ) : if len ( b ) < 8 : raise ValueError ( "given packet too short" ) return super ( DoIPRawPacket , self ) . dissect ( b )
Dissect an incoming DoIP packet .
48,434
def inet_pton ( af , addr ) : print ( 'hello' ) if af == socket . AF_INET : return inet_aton ( addr ) elif af == socket . AF_INET6 : JOKER = b"*" while b"::" in addr : addr = addr . replace ( b"::" , b":" + JOKER + b":" ) joker_pos = None ipv4_addr = None if b"." in addr : ipv4_addr = addr . split ( b":" ) [ - 1 ] result = b"" parts = addr . split ( b":" ) for part in parts : if part == JOKER : if joker_pos is None : joker_pos = len ( result ) else : raise Exception ( "Illegal syntax for IP address" ) elif part == ipv4_addr : result += socket . inet_aton ( ipv4_addr ) else : try : result += part . rjust ( 4 , b"0" ) . decode ( "hex" ) except TypeError : raise Exception ( "Illegal syntax for IP address" ) if JOKER in addr : result = ( result [ : joker_pos ] + b"\x00" * ( 16 - len ( result ) ) + result [ joker_pos : ] ) if len ( result ) != 16 : raise Exception ( "Illegal syntax for IP address" ) return result else : raise Exception ( "Address family not supported" )
Convert an IP address from text representation into binary form
48,435
def inet_ntop ( af , addr ) : if af == socket . AF_INET : return inet_ntoa ( addr ) elif af == socket . AF_INET6 : if len ( addr ) != 16 : raise Exception ( "Illegal syntax for IP address" ) parts = [ ] for left in [ 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 ] : try : value = struct . unpack ( "!H" , addr [ left : left + 2 ] ) [ 0 ] hexstr = hex ( value ) [ 2 : ] except TypeError : raise Exception ( "Illegal syntax for IP address" ) parts . append ( hexstr . lstrip ( "0" ) . lower ( ) ) result = b":" . join ( parts ) while b":::" in result : result = result . replace ( b":::" , b"::" ) if result . endswith ( b":" ) and not result . endswith ( b"::" ) : result = result + b"0" if result . startswith ( b":" ) and not result . startswith ( b"::" ) : result = b"0" + result return result else : raise Exception ( "Address family not supported yet" )
Convert an IP address from binary form into text represenation
48,436
def strand ( s1 , s2 ) : return "" . join ( map ( lambda x , y : chr ( ord ( x ) & ord ( y ) ) , s1 , s2 ) )
Returns the binary AND of the 2 provided strings s1 and s2 . s1 and s2 must be of same length .
48,437
def pkcs_mgf1 ( mgfSeed , maskLen , h ) : if not h in _hashFuncParams : warning ( "pkcs_mgf1: invalid hash (%s) provided" ) return None hLen = _hashFuncParams [ h ] [ 0 ] hFunc = _hashFuncParams [ h ] [ 1 ] if maskLen > 2 ** 32 * hLen : warning ( "pkcs_mgf1: maskLen > 2**32 * hLen" ) return None T = "" maxCounter = math . ceil ( float ( maskLen ) / float ( hLen ) ) counter = 0 while counter < maxCounter : C = pkcs_i2osp ( counter , 4 ) T += hFunc ( mgfSeed + C ) counter += 1 return T [ : maskLen ]
Implements generic MGF1 Mask Generation function as described in Appendix B . 2 . 1 of RFC 3447 . The hash function is passed by name . valid values are md2 md4 md5 sha1 tls sha256 sha384 and sha512 . Returns None on error .
48,438
def create_temporary_ca_path ( anchor_list , folder ) : try : if not os . path . isdir ( folder ) : os . makedirs ( folder ) except : return None l = len ( anchor_list ) if l == 0 : return None fmtstr = "%%0%sd.pem" % math . ceil ( math . log ( l , 10 ) ) i = 0 try : for a in anchor_list : fname = os . path . join ( folder , fmtstr % i ) f = open ( fname , "w" ) s = a . output ( fmt = "PEM" ) f . write ( s ) f . close ( ) i += 1 except : return None r , w , e = popen3 ( [ "c_rehash" , folder ] ) r . close ( ) w . close ( ) e . close ( ) return l
Create a CA path folder as defined in OpenSSL terminology by storing all certificates in anchor_list list in PEM format under provided folder and then creating the associated links using the hash as usually done by c_rehash .
48,439
def _rsadp ( self , c ) : n = self . modulus if type ( c ) is int : c = long ( c ) if type ( c ) is not long or c > n - 1 : warning ( "Key._rsaep() expects a long between 0 and n-1" ) return None return self . key . decrypt ( c )
Internal method providing raw RSA decryption i . e . simple modular exponentiation of the given ciphertext representative c a long between 0 and n - 1 .
48,440
def fragment ( pkt , fragsize = 1480 ) : fragsize = ( fragsize + 7 ) // 8 * 8 lst = [ ] for p in pkt : s = bytes ( p [ IP ] . payload ) nb = ( len ( s ) + fragsize - 1 ) // fragsize for i in range ( nb ) : q = p . copy ( ) del q [ IP ] . payload del q [ IP ] . chksum del q [ IP ] . len if i == nb - 1 : q [ IP ] . flags &= ~ 1 else : q [ IP ] . flags |= 1 q [ IP ] . frag = i * fragsize // 8 r = conf . raw_layer ( load = s [ i * fragsize : ( i + 1 ) * fragsize ] ) r . overload_fields = p [ IP ] . payload . overload_fields . copy ( ) q . add_payload ( r ) lst . append ( q ) return lst
Fragment a big IP datagram
48,441
def parse_version ( str_ ) : v = re . findall ( r"\d+.\d+.\d+" , str_ ) if v : return v [ 0 ] else : print ( "cannot parse string {}" . format ( str_ ) ) raise KeyError
Parses the program s version from a python variable declaration .
48,442
def make_session ( username = None , password = None , bearer_token = None , extra_headers_dict = None ) : if password is None and bearer_token is None : logger . error ( "No authentication information provided; " "please check your object" ) raise KeyError session = requests . Session ( ) session . trust_env = False headers = { 'Accept-encoding' : 'gzip' , 'User-Agent' : 'twitterdev-search-tweets-python/' + VERSION } if bearer_token : logger . info ( "using bearer token for authentication" ) headers [ 'Authorization' ] = "Bearer {}" . format ( bearer_token ) session . headers = headers else : logger . info ( "using username and password for authentication" ) session . auth = username , password session . headers = headers if extra_headers_dict : headers . update ( extra_headers_dict ) return session
Creates a Requests Session for use . Accepts a bearer token for premiums users and will override username and password information if present .
48,443
def retry ( func ) : def retried_func ( * args , ** kwargs ) : max_tries = 3 tries = 0 while True : try : resp = func ( * args , ** kwargs ) except requests . exceptions . ConnectionError as exc : exc . msg = "Connection error for session; exiting" raise exc except requests . exceptions . HTTPError as exc : exc . msg = "HTTP error for session; exiting" raise exc if resp . status_code != 200 and tries < max_tries : logger . warning ( "retrying request; current status code: {}" . format ( resp . status_code ) ) tries += 1 time . sleep ( tries ** 2 ) continue break if resp . status_code != 200 : error_message = resp . json ( ) [ "error" ] [ "message" ] logger . error ( "HTTP Error code: {}: {}" . format ( resp . status_code , error_message ) ) logger . error ( "Rule payload: {}" . format ( kwargs [ "rule_payload" ] ) ) raise requests . exceptions . HTTPError return resp return retried_func
Decorator to handle API retries and exceptions . Defaults to three retries .
48,444
def request ( session , url , rule_payload , ** kwargs ) : if isinstance ( rule_payload , dict ) : rule_payload = json . dumps ( rule_payload ) logger . debug ( "sending request" ) result = session . post ( url , data = rule_payload , ** kwargs ) return result
Executes a request with the given payload and arguments .
48,445
def collect_results ( rule , max_results = 500 , result_stream_args = None ) : if result_stream_args is None : logger . error ( "This function requires a configuration dict for the " "inner ResultStream object." ) raise KeyError rs = ResultStream ( rule_payload = rule , max_results = max_results , ** result_stream_args ) return list ( rs . stream ( ) )
Utility function to quickly get a list of tweets from a ResultStream without keeping the object around . Requires your args to be configured prior to using .
48,446
def stream ( self ) : self . init_session ( ) self . check_counts ( ) self . execute_request ( ) self . stream_started = True while True : for tweet in self . current_tweets : if self . total_results >= self . max_results : break yield self . _tweet_func ( tweet ) self . total_results += 1 if self . next_token and self . total_results < self . max_results and self . n_requests <= self . max_requests : self . rule_payload = merge_dicts ( self . rule_payload , { "next" : self . next_token } ) logger . info ( "paging; total requests read so far: {}" . format ( self . n_requests ) ) self . execute_request ( ) else : break logger . info ( "ending stream at {} tweets" . format ( self . total_results ) ) self . current_tweets = None self . session . close ( )
Main entry point for the data from the API . Will automatically paginate through the results via the next token and return up to max_results tweets or up to max_requests API calls whichever is lower .
48,447
def init_session ( self ) : if self . session : self . session . close ( ) self . session = make_session ( self . username , self . password , self . bearer_token , self . extra_headers_dict )
Defines a session object for passing requests .
48,448
def check_counts ( self ) : if "counts" in re . split ( "[/.]" , self . endpoint ) : logger . info ( "disabling tweet parsing due to counts API usage" ) self . _tweet_func = lambda x : x
Disables tweet parsing if the count API is used .
48,449
def execute_request ( self ) : if self . n_requests % 20 == 0 and self . n_requests > 1 : logger . info ( "refreshing session" ) self . init_session ( ) resp = request ( session = self . session , url = self . endpoint , rule_payload = self . rule_payload ) self . n_requests += 1 ResultStream . session_request_counter += 1 resp = json . loads ( resp . content . decode ( resp . encoding ) ) self . next_token = resp . get ( "next" , None ) self . current_tweets = resp [ "results" ]
Sends the request to the API and parses the json response . Makes some assumptions about the session length and sets the presence of a next token .
48,450
def gen_rule_payload ( pt_rule , results_per_call = None , from_date = None , to_date = None , count_bucket = None , tag = None , stringify = True ) : pt_rule = ' ' . join ( pt_rule . split ( ) ) payload = { "query" : pt_rule } if results_per_call is not None and isinstance ( results_per_call , int ) is True : payload [ "maxResults" ] = results_per_call if to_date : payload [ "toDate" ] = convert_utc_time ( to_date ) if from_date : payload [ "fromDate" ] = convert_utc_time ( from_date ) if count_bucket : if set ( [ "day" , "hour" , "minute" ] ) & set ( [ count_bucket ] ) : payload [ "bucket" ] = count_bucket del payload [ "maxResults" ] else : logger . error ( "invalid count bucket: provided {}" . format ( count_bucket ) ) raise ValueError if tag : payload [ "tag" ] = tag return json . dumps ( payload ) if stringify else payload
Generates the dict or json payload for a PowerTrack rule .
48,451
def gen_params_from_config ( config_dict ) : if config_dict . get ( "count_bucket" ) : logger . warning ( "change your endpoint to the count endpoint; this is " "default behavior when the count bucket " "field is defined" ) endpoint = change_to_count_endpoint ( config_dict . get ( "endpoint" ) ) else : endpoint = config_dict . get ( "endpoint" ) def intify ( arg ) : if not isinstance ( arg , int ) and arg is not None : return int ( arg ) else : return arg results_per_call = intify ( config_dict . get ( "results_per_call" , None ) ) rule = gen_rule_payload ( pt_rule = config_dict [ "pt_rule" ] , from_date = config_dict . get ( "from_date" , None ) , to_date = config_dict . get ( "to_date" , None ) , results_per_call = results_per_call , count_bucket = config_dict . get ( "count_bucket" , None ) ) _dict = { "endpoint" : endpoint , "username" : config_dict . get ( "username" ) , "password" : config_dict . get ( "password" ) , "bearer_token" : config_dict . get ( "bearer_token" ) , "extra_headers_dict" : config_dict . get ( "extra_headers_dict" , None ) , "rule_payload" : rule , "results_per_file" : intify ( config_dict . get ( "results_per_file" ) ) , "max_results" : intify ( config_dict . get ( "max_results" ) ) , "max_pages" : intify ( config_dict . get ( "max_pages" , None ) ) } return _dict
Generates parameters for a ResultStream from a dictionary .
48,452
def infer_endpoint ( rule_payload ) : bucket = ( rule_payload if isinstance ( rule_payload , dict ) else json . loads ( rule_payload ) ) . get ( "bucket" ) return "counts" if bucket else "search"
Infer which endpoint should be used for a given rule payload .
48,453
def validate_count_api ( rule_payload , endpoint ) : rule = ( rule_payload if isinstance ( rule_payload , dict ) else json . loads ( rule_payload ) ) bucket = rule . get ( 'bucket' ) counts = set ( endpoint . split ( "/" ) ) & { "counts.json" } if len ( counts ) == 0 : if bucket is not None : msg = ( ) logger . error ( msg ) raise ValueError
Ensures that the counts api is set correctly in a payload .
48,454
def partition ( iterable , chunk_size , pad_none = False ) : args = [ iter ( iterable ) ] * chunk_size if not pad_none : return zip ( * args ) else : return it . zip_longest ( * args )
adapted from Toolz . Breaks an iterable into n iterables up to the certain chunk size padding with Nones if availble .
48,455
def write_ndjson ( filename , data_iterable , append = False , ** kwargs ) : write_mode = "ab" if append else "wb" logger . info ( "writing to file {}" . format ( filename ) ) with codecs . open ( filename , write_mode , "utf-8" ) as outfile : for item in data_iterable : outfile . write ( json . dumps ( item ) + "\n" ) yield item
Generator that writes newline - delimited json to a file and returns items from an iterable .
48,456
def write_result_stream ( result_stream , filename_prefix = None , results_per_file = None , ** kwargs ) : if isinstance ( result_stream , types . GeneratorType ) : stream = result_stream else : stream = result_stream . stream ( ) file_time_formatter = "%Y-%m-%dT%H_%M_%S" if filename_prefix is None : filename_prefix = "twitter_search_results" if results_per_file : logger . info ( "chunking result stream to files with {} tweets per file" . format ( results_per_file ) ) chunked_stream = partition ( stream , results_per_file , pad_none = True ) for chunk in chunked_stream : chunk = filter ( lambda x : x is not None , chunk ) curr_datetime = ( datetime . datetime . utcnow ( ) . strftime ( file_time_formatter ) ) _filename = "{}_{}.json" . format ( filename_prefix , curr_datetime ) yield from write_ndjson ( _filename , chunk ) else : curr_datetime = ( datetime . datetime . utcnow ( ) . strftime ( file_time_formatter ) ) _filename = "{}.json" . format ( filename_prefix ) yield from write_ndjson ( _filename , stream )
Wraps a ResultStream object to save it to a file . This function will still return all data from the result stream as a generator that wraps the write_ndjson method .
48,457
def _load_yaml_credentials ( filename = None , yaml_key = None ) : try : with open ( os . path . expanduser ( filename ) ) as f : search_creds = yaml . safe_load ( f ) [ yaml_key ] except FileNotFoundError : logger . error ( "cannot read file {}" . format ( filename ) ) search_creds = { } except KeyError : logger . error ( "{} is missing the provided key: {}" . format ( filename , yaml_key ) ) search_creds = { } return search_creds
Loads and parses credentials in a YAML file . Catches common exceptions and returns an empty dict on error which will be handled downstream .
48,458
def _generate_bearer_token ( consumer_key , consumer_secret ) : data = [ ( 'grant_type' , 'client_credentials' ) ] resp = requests . post ( OAUTH_ENDPOINT , data = data , auth = ( consumer_key , consumer_secret ) ) logger . warning ( "Grabbing bearer token from OAUTH" ) if resp . status_code >= 400 : logger . error ( resp . text ) resp . raise_for_status ( ) return resp . json ( ) [ 'access_token' ]
Return the bearer token for a given pair of consumer key and secret values .
48,459
def fi_business_id ( business_id ) : if not business_id or not re . match ( business_id_pattern , business_id ) : return False factors = [ 7 , 9 , 10 , 5 , 8 , 4 , 2 ] numbers = map ( int , business_id [ : 7 ] ) checksum = int ( business_id [ 8 ] ) sum_ = sum ( f * n for f , n in zip ( factors , numbers ) ) modulo = sum_ % 11 return ( 11 - modulo == checksum ) or ( modulo == 0 and checksum == 0 )
Validate a Finnish Business ID .
48,460
def fi_ssn ( ssn , allow_temporal_ssn = True ) : if not ssn : return False result = re . match ( ssn_pattern , ssn ) if not result : return False gd = result . groupdict ( ) checksum = int ( gd [ 'date' ] + gd [ 'serial' ] ) return ( int ( gd [ 'serial' ] ) >= 2 and ( allow_temporal_ssn or int ( gd [ 'serial' ] ) <= 899 ) and ssn_checkmarks [ checksum % len ( ssn_checkmarks ) ] == gd [ 'checksum' ] )
Validate a Finnish Social Security Number .
48,461
def modcheck ( value ) : rearranged = value [ 4 : ] + value [ : 4 ] converted = [ char_value ( char ) for char in rearranged ] integerized = int ( '' . join ( [ str ( i ) for i in converted ] ) ) return ( integerized % 97 == 1 )
Check if the value string passes the mod97 - test .
48,462
def func_args_as_dict ( func , args , kwargs ) : if six . PY2 : _getargspec = inspect . getargspec else : _getargspec = inspect . getfullargspec arg_names = list ( OrderedDict . fromkeys ( itertools . chain ( _getargspec ( func ) [ 0 ] , kwargs . keys ( ) ) ) ) return OrderedDict ( list ( six . moves . zip ( arg_names , args ) ) + list ( kwargs . items ( ) ) )
Return given function s positional and key value arguments as an ordered dictionary .
48,463
def validator ( func , * args , ** kwargs ) : def wrapper ( func , * args , ** kwargs ) : value = func ( * args , ** kwargs ) if not value : return ValidationFailure ( func , func_args_as_dict ( func , args , kwargs ) ) return True return decorator ( wrapper , func )
A decorator that makes given function validator .
48,464
def length ( value , min = None , max = None ) : if ( min is not None and min < 0 ) or ( max is not None and max < 0 ) : raise AssertionError ( '`min` and `max` need to be greater than zero.' ) return between ( len ( value ) , min = min , max = max )
Return whether or not the length of given string is within a specified range .
48,465
def url ( value , public = False ) : result = pattern . match ( value ) if not public : return result return result and not any ( ( result . groupdict ( ) . get ( key ) for key in ( 'private_ip' , 'private_host' ) ) )
Return whether or not given value is a valid URL .
48,466
def ipv4 ( value ) : groups = value . split ( '.' ) if len ( groups ) != 4 or any ( not x . isdigit ( ) for x in groups ) : return False return all ( 0 <= int ( part ) < 256 for part in groups )
Return whether or not given value is a valid IP version 4 address .
48,467
def post_report ( coverage , args ) : response = requests . post ( URL , files = { 'json_file' : json . dumps ( coverage ) } , verify = ( not args . skip_ssl_verify ) ) try : result = response . json ( ) except ValueError : result = { 'error' : 'Failure to submit data. ' 'Response [%(status)s]: %(text)s' % { 'status' : response . status_code , 'text' : response . text } } print ( result ) if 'error' in result : return result [ 'error' ] return 0
Post coverage report to coveralls . io .
48,468
def is_source_file ( args , filepath ) : if args . extension : return os . path . splitext ( filepath ) [ 1 ] in args . extension else : return os . path . splitext ( filepath ) [ 1 ] in _CPP_EXTENSIONS
Returns true if it is a C ++ source file .
48,469
def exclude_paths ( args ) : results = [ ] if args . exclude : for excl_path in args . exclude : results . append ( os . path . abspath ( os . path . join ( args . root , excl_path ) ) ) return results
Returns the absolute paths for excluded path .
48,470
def create_exclude_rules ( args ) : global _cached_exclude_rules if _cached_exclude_rules is not None : return _cached_exclude_rules rules = [ ] for excl_path in args . exclude : abspath = os . path . abspath ( os . path . join ( args . root , excl_path ) ) rules . append ( ( abspath , True ) ) for incl_path in args . include : abspath = os . path . abspath ( os . path . join ( args . root , incl_path ) ) rules . append ( ( abspath , False ) ) _cached_exclude_rules = sorted ( rules , key = lambda p : p [ 0 ] ) return _cached_exclude_rules
Creates the exlude rules
48,471
def is_excluded_path ( args , filepath ) : for regexp_exclude_path in args . regexp : if re . match ( regexp_exclude_path , filepath ) : return True abspath = os . path . abspath ( filepath ) if args . include : out_of_include_dirs = True for incl_path in args . include : absolute_include_path = os . path . abspath ( os . path . join ( args . root , incl_path ) ) if is_child_dir ( absolute_include_path , abspath ) : out_of_include_dirs = False break if out_of_include_dirs : return True excl_rules = create_exclude_rules ( args ) for i , rule in enumerate ( excl_rules ) : if rule [ 0 ] == abspath : return rule [ 1 ] if is_child_dir ( rule [ 0 ] , abspath ) : last_result = rule [ 1 ] for j in range ( i + 1 , len ( excl_rules ) ) : rule_deep = excl_rules [ j ] if not is_child_dir ( rule_deep [ 0 ] , abspath ) : break last_result = rule_deep [ 1 ] return last_result return False
Returns true if the filepath is under the one of the exclude path .
48,472
def filter_dirs ( root , dirs , excl_paths ) : filtered_dirs = [ ] for dirpath in dirs : abspath = os . path . abspath ( os . path . join ( root , dirpath ) ) if os . path . basename ( abspath ) in _SKIP_DIRS : continue if abspath not in excl_paths : filtered_dirs . append ( dirpath ) return filtered_dirs
Filter directory paths based on the exclusion rules defined in excl_paths .
48,473
def parse_gcov_file ( args , fobj , filename ) : coverage = [ ] ignoring = False for line in fobj : report_fields = line . decode ( 'utf-8' , 'replace' ) . split ( ':' , 2 ) if len ( report_fields ) == 1 : continue line_num = report_fields [ 1 ] . strip ( ) if line_num == '' : continue cov_num = report_fields [ 0 ] . strip ( ) line_num = int ( line_num ) text = report_fields [ 2 ] if line_num == 0 : continue if re . search ( r'\bLCOV_EXCL_START\b' , text ) : if ignoring : sys . stderr . write ( "Warning: %s:%d: nested LCOV_EXCL_START, " "please fix\n" % ( filename , line_num ) ) ignoring = True elif re . search ( r'\bLCOV_EXCL_(STOP|END)\b' , text ) : if not ignoring : sys . stderr . write ( "Warning: %s:%d: LCOV_EXCL_STOP outside of " "exclusion zone, please fix\n" % ( filename , line_num ) ) if 'LCOV_EXCL_END' in text : sys . stderr . write ( "Warning: %s:%d: LCOV_EXCL_STOP is the " "correct keyword\n" % ( filename , line_num ) ) ignoring = False if cov_num == '-' : coverage . append ( None ) elif cov_num == '#####' : if ( ignoring or any ( [ re . search ( pattern , text ) for pattern in args . exclude_lines_pattern ] ) ) : coverage . append ( None ) else : coverage . append ( 0 ) elif cov_num == '=====' : coverage . append ( 0 ) else : coverage . append ( int ( cov_num . rstrip ( '*' ) ) ) return coverage
Parses the content of . gcov file
48,474
def parse_lcov_file_info ( args , filepath , line_iter , line_coverage_re , file_end_string ) : coverage = [ ] lines_covered = [ ] for line in line_iter : if line != "end_of_record" : line_coverage_match = line_coverage_re . match ( line ) if line_coverage_match : line_no = line_coverage_match . group ( 1 ) cov_count = int ( line_coverage_match . group ( 2 ) ) if args . max_cov_count : if cov_count > args . max_cov_count : cov_count = args . max_cov_count + 1 lines_covered . append ( ( line_no , cov_count ) ) else : break num_code_lines = len ( [ line . rstrip ( '\n' ) for line in open ( filepath , 'r' ) ] ) coverage = [ None ] * num_code_lines for line_covered in lines_covered : coverage [ int ( line_covered [ 0 ] ) - 1 ] = line_covered [ 1 ] return coverage
Parse the file content in lcov info file
48,475
def combine_reports ( original , new ) : if original is None : return new report = { } report [ 'name' ] = original [ 'name' ] report [ 'source_digest' ] = original [ 'source_digest' ] coverage = [ ] for original_num , new_num in zip ( original [ 'coverage' ] , new [ 'coverage' ] ) : if original_num is None : coverage . append ( new_num ) elif new_num is None : coverage . append ( original_num ) else : coverage . append ( original_num + new_num ) report [ 'coverage' ] = coverage return report
Combines two gcov reports for a file into one by adding the number of hits on each line
48,476
def collect_non_report_files ( args , discovered_files ) : excl_paths = exclude_paths ( args ) abs_root = os . path . abspath ( args . root ) non_report_files = [ ] for root , dirs , files in os . walk ( args . root , followlinks = args . follow_symlinks ) : dirs [ : ] = filter_dirs ( root , dirs , excl_paths ) for filename in files : if not is_source_file ( args , filename ) : continue abs_filepath = os . path . join ( os . path . abspath ( root ) , filename ) if is_excluded_path ( args , abs_filepath ) : continue filepath = os . path . relpath ( abs_filepath , abs_root ) if filepath not in discovered_files : src_report = { } src_report [ 'name' ] = posix_path ( filepath ) coverage = [ ] with io . open ( abs_filepath , mode = 'rb' ) as fobj : for _ in fobj : coverage . append ( None ) fobj . seek ( 0 ) src_report [ 'source_digest' ] = hashlib . md5 ( fobj . read ( ) ) . hexdigest ( ) src_report [ 'coverage' ] = coverage non_report_files . append ( src_report ) return non_report_files
Collects the source files that have no coverage reports .
48,477
def parse_yaml_config ( args ) : try : import yaml except ImportError : yaml = None yml = { } try : with open ( args . coveralls_yaml , 'r' ) as fp : if not yaml : raise SystemExit ( 'PyYAML is required for parsing configuration' ) yml = yaml . load ( fp ) except IOError : pass yml = yml or { } return yml
Parse yaml config
48,478
def run ( ) : import json import os import sys from . import coverage , report args = coverage . create_args ( sys . argv [ 1 : ] ) if args . verbose : print ( 'encodings: {}' . format ( args . encodings ) ) yml = parse_yaml_config ( args ) if not args . repo_token : args . repo_token = yml . get ( 'repo_token' , '' ) if not args . repo_token : args . repo_token = os . environ . get ( 'COVERALLS_REPO_TOKEN' ) args . service_name = yml . get ( 'service_name' , 'travis-ci' ) if not args . gcov_options : args . gcov_options = yml . get ( 'gcov_options' , '' ) if not args . root : args . root = yml . get ( 'root' , '.' ) if not args . build_root : args . build_root = yml . get ( 'build_root' , '' ) args . exclude . extend ( yml . get ( 'exclude' , [ ] ) ) args . include . extend ( yml . get ( 'include' , [ ] ) ) args . exclude_lines_pattern . extend ( yml . get ( 'exclude_lines_pattern' , [ ] ) ) args . service_job_id = os . environ . get ( 'TRAVIS_JOB_ID' , '' ) if args . repo_token == '' and args . service_job_id == '' : raise ValueError ( "\nno coveralls.io token specified and no travis job id found\n" "see --help for examples on how to specify a token\n" ) if not args . no_gcov : coverage . run_gcov ( args ) cov_report = coverage . collect ( args ) if args . verbose : print ( cov_report ) if args . dryrun : return 0 if args . dump : args . dump . write ( json . dumps ( cov_report ) ) return 0 return report . post_report ( cov_report , args )
Run cpp coverage .
48,479
def gitrepo ( cwd ) : repo = Repository ( cwd ) if not repo . valid ( ) : return { } return { 'head' : { 'id' : repo . gitlog ( '%H' ) , 'author_name' : repo . gitlog ( '%aN' ) , 'author_email' : repo . gitlog ( '%ae' ) , 'committer_name' : repo . gitlog ( '%cN' ) , 'committer_email' : repo . gitlog ( '%ce' ) , 'message' : repo . gitlog ( '%s' ) } , 'branch' : os . environ . get ( 'TRAVIS_BRANCH' , os . environ . get ( 'APPVEYOR_REPO_BRANCH' , repo . git ( 'rev-parse' , '--abbrev-ref' , 'HEAD' ) [ 1 ] . strip ( ) ) ) , 'remotes' : [ { 'name' : line . split ( ) [ 0 ] , 'url' : line . split ( ) [ 1 ] } for line in repo . git ( 'remote' , '-v' ) [ 1 ] if '(fetch)' in line ] }
Return hash of Git data that can be used to display more information to users .
48,480
def _verify ( function ) : def wrapped ( pin , * args , ** kwargs ) : pin = int ( pin ) if pin not in _open : ppath = gpiopath ( pin ) if not os . path . exists ( ppath ) : log . debug ( "Creating Pin {0}" . format ( pin ) ) with _export_lock : with open ( pjoin ( gpio_root , 'export' ) , 'w' ) as f : _write ( f , pin ) value = open ( pjoin ( ppath , 'value' ) , FMODE ) direction = open ( pjoin ( ppath , 'direction' ) , FMODE ) _open [ pin ] = PinState ( value = value , direction = direction ) return function ( pin , * args , ** kwargs ) return wrapped
decorator to ensure pin is properly set up
48,481
def set ( pin , value ) : if value is LOW : value = 0 value = int ( bool ( value ) ) log . debug ( "Write {0}: {1}" . format ( pin , value ) ) f = _open [ pin ] . value _write ( f , value )
set the pin value to 0 or 1
48,482
def end ( self ) : _checkErr ( 'vend' , _C . Vfinish ( self . _hdf_inst . _id ) , "cannot terminate V interface" ) self . _hdf_inst = None
Close the V interface .
48,483
def attach ( self , num_name , write = 0 ) : if isinstance ( num_name , bytes ) : num = self . find ( num_name ) else : num = num_name vg_id = _C . Vattach ( self . _hdf_inst . _id , num , write and 'w' or 'r' ) _checkErr ( 'vattach' , vg_id , "cannot attach Vgroup" ) return VG ( self , vg_id )
Open an existing vgroup given its name or its reference number or create a new vgroup returning a VG instance for that vgroup .
48,484
def create ( self , name ) : vg = self . attach ( - 1 , 1 ) vg . _name = name return vg
Create a new vgroup and assign it a name .
48,485
def find ( self , name ) : refnum = _C . Vfind ( self . _hdf_inst . _id , name ) if not refnum : raise HDF4Error ( "vgroup not found" ) return refnum
Find a vgroup given its name returning its reference number if found .
48,486
def findclass ( self , name ) : refnum = _C . Vfindclass ( self . _hdf_inst . _id , name ) if not refnum : raise HDF4Error ( "vgroup not found" ) return refnum
Find a vgroup given its class name returning its reference number if found .
48,487
def delete ( self , num_name ) : try : vg = self . attach ( num_name , 1 ) except HDF4Error as msg : raise HDF4Error ( "delete: no such vgroup" ) refnum = vg . _refnum vg . detach ( ) _checkErr ( 'delete' , _C . Vdelete ( self . _hdf_inst . _id , refnum ) , "error deleting vgroup" )
Delete from the HDF file the vgroup identified by its reference number or its name .
48,488
def getid ( self , ref ) : num = _C . Vgetid ( self . _hdf_inst . _id , ref ) _checkErr ( 'getid' , num , "bad arguments or last vgroup reached" ) return num
Obtain the reference number of the vgroup following the vgroup with the given reference number .
48,489
def insert ( self , inst ) : if isinstance ( inst , VD ) : id = inst . _id elif isinstance ( inst , VG ) : id = inst . _id else : raise HDF4Error ( "insrt: bad argument" ) index = _C . Vinsert ( self . _id , id ) _checkErr ( 'insert' , index , "cannot insert in vgroup" ) return index
Insert a vdata or a vgroup in the vgroup .
48,490
def add ( self , tag , ref ) : n = _C . Vaddtagref ( self . _id , tag , ref ) _checkErr ( 'addtagref' , n , 'invalid arguments' ) return n
Add to the vgroup an object identified by its tag and reference number .
48,491
def delete ( self , tag , ref ) : _checkErr ( 'delete' , _C . Vdeletetagref ( self . _id , tag , ref ) , "error deleting member" )
Delete from the vgroup the member identified by its tag and reference number .
48,492
def tagref ( self , index ) : status , tag , ref = _C . Vgettagref ( self . _id , index ) _checkErr ( 'tagref' , status , "illegal arguments" ) return tag , ref
Get the tag and reference number of a vgroup member given the index number of that member .
48,493
def tagrefs ( self ) : n = self . _nmembers ret = [ ] if n : tags = _C . array_int32 ( n ) refs = _C . array_int32 ( n ) k = _C . Vgettagrefs ( self . _id , tags , refs , n ) _checkErr ( 'tagrefs' , k , "error getting tags and refs" ) for m in xrange ( k ) : ret . append ( ( tags [ m ] , refs [ m ] ) ) return ret
Get the tags and reference numbers of all the vgroup members .
48,494
def inqtagref ( self , tag , ref ) : return _C . Vinqtagref ( self . _id , tag , ref )
Determines if an object identified by its tag and reference number belongs to the vgroup .
48,495
def nrefs ( self , tag ) : n = _C . Vnrefs ( self . _id , tag ) _checkErr ( 'nrefs' , n , "bad arguments" ) return n
Determine the number of tags of a given type in a vgroup .
48,496
def attrinfo ( self ) : dic = { } for n in range ( self . _nattrs ) : att = self . attr ( n ) name , type , order , size = att . info ( ) dic [ name ] = ( type , order , att . get ( ) , size ) return dic
Return info about all the vgroup attributes .
48,497
def findattr ( self , name ) : try : att = self . attr ( name ) if att . _index is None : att = None except HDF4Error : att = None return att
Search the vgroup for a given attribute .
48,498
def index ( self ) : self . _index = _C . SDfindattr ( self . _obj . _id , self . _name ) _checkErr ( 'find' , self . _index , 'illegal attribute name' ) return self . _index
Retrieve the attribute index number .
48,499
def end ( self ) : status = _C . SDend ( self . _id ) _checkErr ( 'end' , status , "cannot execute" ) self . _id = None
End access to the SD interface and close the HDF file .