idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
231,300
def delete ( self , * * kwargs ) : for x in range ( 0 , 30 ) : try : return self . _delete ( * * kwargs ) except iControlUnexpectedHTTPError as ex : if self . _check_exception ( ex ) : continue else : raise
Custom deletion logic to handle edge cases
62
7
231,301
def reset ( self ) : Y , D , X = self . old_data [ 'Y' ] , self . old_data [ 'D' ] , self . old_data [ 'X' ] self . raw_data = Data ( Y , D , X ) self . summary_stats = Summary ( self . raw_data ) self . propensity = None self . cutoff = None self . blocks = None self . strata = None self . estimates = Estimators ( )
Reinitializes data to original inputs and drops any estimated results .
102
13
231,302
def est_propensity ( self , lin = 'all' , qua = None ) : lin_terms = parse_lin_terms ( self . raw_data [ 'K' ] , lin ) qua_terms = parse_qua_terms ( self . raw_data [ 'K' ] , qua ) self . propensity = Propensity ( self . raw_data , lin_terms , qua_terms ) self . raw_data . _dict [ 'pscore' ] = self . propensity [ 'fitted' ] self . _post_pscore_init ( )
Estimates the propensity scores given list of covariates to include linearly or quadratically .
124
19
231,303
def trim ( self ) : if 0 < self . cutoff <= 0.5 : pscore = self . raw_data [ 'pscore' ] keep = ( pscore >= self . cutoff ) & ( pscore <= 1 - self . cutoff ) Y_trimmed = self . raw_data [ 'Y' ] [ keep ] D_trimmed = self . raw_data [ 'D' ] [ keep ] X_trimmed = self . raw_data [ 'X' ] [ keep ] self . raw_data = Data ( Y_trimmed , D_trimmed , X_trimmed ) self . raw_data . _dict [ 'pscore' ] = pscore [ keep ] self . summary_stats = Summary ( self . raw_data ) self . strata = None self . estimates = Estimators ( ) elif self . cutoff == 0 : pass else : raise ValueError ( 'Invalid cutoff.' )
Trims data based on propensity score to create a subsample with better covariate balance . The default cutoff value is set to 0 . 1 . To set a custom cutoff value modify the object attribute named cutoff directly .
204
43
231,304
def stratify ( self ) : Y , D , X = self . raw_data [ 'Y' ] , self . raw_data [ 'D' ] , self . raw_data [ 'X' ] pscore = self . raw_data [ 'pscore' ] if isinstance ( self . blocks , int ) : blocks = split_equal_bins ( pscore , self . blocks ) else : blocks = self . blocks [ : ] # make a copy; should be sorted blocks [ 0 ] = 0 # avoids always dropping 1st unit def subset ( p_low , p_high ) : return ( p_low < pscore ) & ( pscore <= p_high ) subsets = [ subset ( * ps ) for ps in zip ( blocks , blocks [ 1 : ] ) ] strata = [ CausalModel ( Y [ s ] , D [ s ] , X [ s ] ) for s in subsets ] self . strata = Strata ( strata , subsets , pscore )
Stratifies the sample based on propensity score . By default the sample is divided into five equal - sized bins . The number of bins can be set by modifying the object attribute named blocks . Alternatively custom - sized bins can be created by setting blocks equal to a sorted list of numbers between 0 and 1 indicating the bin boundaries .
216
65
231,305
def est_via_matching ( self , weights = 'inv' , matches = 1 , bias_adj = False ) : X , K = self . raw_data [ 'X' ] , self . raw_data [ 'K' ] X_c , X_t = self . raw_data [ 'X_c' ] , self . raw_data [ 'X_t' ] if weights == 'inv' : W = 1 / X . var ( 0 ) elif weights == 'maha' : V_c = np . cov ( X_c , rowvar = False , ddof = 0 ) V_t = np . cov ( X_t , rowvar = False , ddof = 0 ) if K == 1 : W = 1 / np . array ( [ [ ( V_c + V_t ) / 2 ] ] ) # matrix form else : W = np . linalg . inv ( ( V_c + V_t ) / 2 ) else : W = weights self . estimates [ 'matching' ] = Matching ( self . raw_data , W , matches , bias_adj )
Estimates average treatment effects using nearest - neighborhood matching .
243
11
231,306
def random_data ( N = 5000 , K = 3 , unobservables = False , * * kwargs ) : mu = kwargs . get ( 'mu' , np . zeros ( K ) ) beta = kwargs . get ( 'beta' , np . ones ( K ) ) theta = kwargs . get ( 'theta' , np . ones ( K ) ) delta = kwargs . get ( 'delta' , 3 ) Sigma = kwargs . get ( 'Sigma' , np . identity ( K ) ) Gamma = kwargs . get ( 'Gamma' , np . identity ( 2 ) ) X = np . random . multivariate_normal ( mean = mu , cov = Sigma , size = N ) Xbeta = X . dot ( beta ) pscore = logistic . cdf ( Xbeta ) D = np . array ( [ np . random . binomial ( 1 , p , size = 1 ) for p in pscore ] ) . flatten ( ) epsilon = np . random . multivariate_normal ( mean = np . zeros ( 2 ) , cov = Gamma , size = N ) Y0 = Xbeta + epsilon [ : , 0 ] Y1 = delta + X . dot ( beta + theta ) + epsilon [ : , 1 ] Y = ( 1 - D ) * Y0 + D * Y1 if unobservables : return Y , D , X , Y0 , Y1 , pscore else : return Y , D , X
Function that generates data according to one of two simple models that satisfies the unconfoundedness assumption .
330
20
231,307
def _summarize_pscore ( self , pscore_c , pscore_t ) : self . _dict [ 'p_min' ] = min ( pscore_c . min ( ) , pscore_t . min ( ) ) self . _dict [ 'p_max' ] = max ( pscore_c . max ( ) , pscore_t . max ( ) ) self . _dict [ 'p_c_mean' ] = pscore_c . mean ( ) self . _dict [ 'p_t_mean' ] = pscore_t . mean ( )
Called by Strata class during initialization .
130
9
231,308
def lookup_bulk ( self , ResponseGroup = "Large" , * * kwargs ) : response = self . api . ItemLookup ( ResponseGroup = ResponseGroup , * * kwargs ) root = objectify . fromstring ( response ) if not hasattr ( root . Items , 'Item' ) : return [ ] return list ( AmazonProduct ( item , self . aws_associate_tag , self , region = self . region ) for item in root . Items . Item )
Lookup Amazon Products in bulk .
106
7
231,309
def similarity_lookup ( self , ResponseGroup = "Large" , * * kwargs ) : response = self . api . SimilarityLookup ( ResponseGroup = ResponseGroup , * * kwargs ) root = objectify . fromstring ( response ) if root . Items . Request . IsValid == 'False' : code = root . Items . Request . Errors . Error . Code msg = root . Items . Request . Errors . Error . Message raise SimilartyLookupException ( "Amazon Similarty Lookup Error: '{0}', '{1}'" . format ( code , msg ) ) return [ AmazonProduct ( item , self . aws_associate_tag , self . api , region = self . region ) for item in getattr ( root . Items , 'Item' , [ ] ) ]
Similarty Lookup .
175
6
231,310
def browse_node_lookup ( self , ResponseGroup = "BrowseNodeInfo" , * * kwargs ) : response = self . api . BrowseNodeLookup ( ResponseGroup = ResponseGroup , * * kwargs ) root = objectify . fromstring ( response ) if root . BrowseNodes . Request . IsValid == 'False' : code = root . BrowseNodes . Request . Errors . Error . Code msg = root . BrowseNodes . Request . Errors . Error . Message raise BrowseNodeLookupException ( "Amazon BrowseNode Lookup Error: '{0}', '{1}'" . format ( code , msg ) ) return [ AmazonBrowseNode ( node . BrowseNode ) for node in root . BrowseNodes ]
Browse Node Lookup .
160
6
231,311
def search_n ( self , n , * * kwargs ) : region = kwargs . get ( 'region' , self . region ) kwargs . update ( { 'region' : region } ) items = AmazonSearch ( self . api , self . aws_associate_tag , * * kwargs ) return list ( islice ( items , n ) )
Search and return first N results ..
83
7
231,312
def _safe_get_element_date ( self , path , root = None ) : value = self . _safe_get_element_text ( path = path , root = root ) if value is not None : try : value = dateutil . parser . parse ( value ) if value : value = value . date ( ) except ValueError : value = None return value
Safe get elemnent date .
78
7
231,313
def iterate_pages ( self ) : try : while not self . is_last_page : self . current_page += 1 yield self . _query ( ItemPage = self . current_page , * * self . kwargs ) except NoMorePages : pass
Iterate Pages .
57
4
231,314
def ancestor ( self ) : ancestors = getattr ( self . parsed_response , 'Ancestors' , None ) if hasattr ( ancestors , 'BrowseNode' ) : return AmazonBrowseNode ( ancestors [ 'BrowseNode' ] ) return None
This browse node s immediate ancestor in the browse node tree .
55
12
231,315
def ancestors ( self ) : ancestors = [ ] node = self . ancestor while node is not None : ancestors . append ( node ) node = node . ancestor return ancestors
A list of this browse node s ancestors in the browse node tree .
34
14
231,316
def children ( self ) : children = [ ] child_nodes = getattr ( self . parsed_response , 'Children' ) for child in getattr ( child_nodes , 'BrowseNode' , [ ] ) : children . append ( AmazonBrowseNode ( child ) ) return children
This browse node s children in the browse node tree .
63
11
231,317
def price_and_currency ( self ) : price = self . _safe_get_element_text ( 'Offers.Offer.OfferListing.SalePrice.Amount' ) if price : currency = self . _safe_get_element_text ( 'Offers.Offer.OfferListing.SalePrice.CurrencyCode' ) else : price = self . _safe_get_element_text ( 'Offers.Offer.OfferListing.Price.Amount' ) if price : currency = self . _safe_get_element_text ( 'Offers.Offer.OfferListing.Price.CurrencyCode' ) else : price = self . _safe_get_element_text ( 'OfferSummary.LowestNewPrice.Amount' ) currency = self . _safe_get_element_text ( 'OfferSummary.LowestNewPrice.CurrencyCode' ) if price : dprice = Decimal ( price ) / 100 if 'JP' not in self . region else Decimal ( price ) return dprice , currency else : return None , None
Get Offer Price and Currency .
241
6
231,318
def reviews ( self ) : iframe = self . _safe_get_element_text ( 'CustomerReviews.IFrameURL' ) has_reviews = self . _safe_get_element_text ( 'CustomerReviews.HasReviews' ) if has_reviews is not None and has_reviews == 'true' : has_reviews = True else : has_reviews = False return has_reviews , iframe
Customer Reviews .
96
3
231,319
def editorial_reviews ( self ) : result = [ ] reviews_node = self . _safe_get_element ( 'EditorialReviews' ) if reviews_node is not None : for review_node in reviews_node . iterchildren ( ) : content_node = getattr ( review_node , 'Content' ) if content_node is not None : result . append ( content_node . text ) return result
Editorial Review .
90
4
231,320
def list_price ( self ) : price = self . _safe_get_element_text ( 'ItemAttributes.ListPrice.Amount' ) currency = self . _safe_get_element_text ( 'ItemAttributes.ListPrice.CurrencyCode' ) if price : dprice = Decimal ( price ) / 100 if 'JP' not in self . region else Decimal ( price ) return dprice , currency else : return None , None
List Price .
95
3
231,321
def get_parent ( self ) : if not self . parent : parent = self . _safe_get_element ( 'ParentASIN' ) if parent : self . parent = self . api . lookup ( ItemId = parent ) return self . parent
Get Parent .
53
3
231,322
def browse_nodes ( self ) : root = self . _safe_get_element ( 'BrowseNodes' ) if root is None : return [ ] return [ AmazonBrowseNode ( child ) for child in root . iterchildren ( ) ]
Browse Nodes .
54
5
231,323
def images ( self ) : try : images = [ image for image in self . _safe_get_element ( 'ImageSets.ImageSet' ) ] except TypeError : # No images in this ResponseGroup images = [ ] return images
List of images for a response . When using lookup with RespnoseGroup Images you ll get a list of images . Parse them so they are returned in an easily used list format .
51
38
231,324
def actors ( self ) : result = [ ] actors = self . _safe_get_element ( 'ItemAttributes.Actor' ) or [ ] for actor in actors : result . append ( actor . text ) return result
Movie Actors .
46
4
231,325
def directors ( self ) : result = [ ] directors = self . _safe_get_element ( 'ItemAttributes.Director' ) or [ ] for director in directors : result . append ( director . text ) return result
Movie Directors .
46
3
231,326
def getMibSymbol ( self ) : if self . _state & self . ST_CLEAN : return self . _modName , self . _symName , self . _indices else : raise SmiError ( '%s object not fully initialized' % self . __class__ . __name__ )
Returns MIB variable symbolic identification .
67
7
231,327
def getOid ( self ) : if self . _state & self . ST_CLEAN : return self . _oid else : raise SmiError ( '%s object not fully initialized' % self . __class__ . __name__ )
Returns OID identifying MIB variable .
52
8
231,328
def getLabel ( self ) : if self . _state & self . ST_CLEAN : return self . _label else : raise SmiError ( '%s object not fully initialized' % self . __class__ . __name__ )
Returns symbolic path to this MIB variable .
51
9
231,329
def addMibSource ( self , * mibSources ) : if self . _mibSourcesToAdd is None : self . _mibSourcesToAdd = mibSources else : self . _mibSourcesToAdd += mibSources return self
Adds path to repository to search PySNMP MIB files .
54
13
231,330
def loadMibs ( self , * modNames ) : if self . _modNamesToLoad is None : self . _modNamesToLoad = modNames else : self . _modNamesToLoad += modNames return self
Schedules search and load of given MIB modules .
48
12
231,331
def resolveWithMib ( self , mibViewController ) : if self . _state & self . ST_CLEAM : return self self . _args [ 0 ] . resolveWithMib ( mibViewController ) MibScalar , MibTableColumn = mibViewController . mibBuilder . importSymbols ( 'SNMPv2-SMI' , 'MibScalar' , 'MibTableColumn' ) if not isinstance ( self . _args [ 0 ] . getMibNode ( ) , ( MibScalar , MibTableColumn ) ) : if not isinstance ( self . _args [ 1 ] , AbstractSimpleAsn1Item ) : raise SmiError ( 'MIB object %r is not OBJECT-TYPE ' '(MIB not loaded?)' % ( self . _args [ 0 ] , ) ) self . _state |= self . ST_CLEAM return self if isinstance ( self . _args [ 1 ] , ( rfc1905 . UnSpecified , rfc1905 . NoSuchObject , rfc1905 . NoSuchInstance , rfc1905 . EndOfMibView ) ) : self . _state |= self . ST_CLEAM return self syntax = self . _args [ 0 ] . getMibNode ( ) . getSyntax ( ) try : self . _args [ 1 ] = syntax . clone ( self . _args [ 1 ] ) except PyAsn1Error as exc : raise SmiError ( 'MIB object %r having type %r failed to cast value ' '%r: %s' % ( self . _args [ 0 ] . prettyPrint ( ) , syntax . __class__ . __name__ , self . _args [ 1 ] , exc ) ) if rfc1902 . ObjectIdentifier ( ) . isSuperTypeOf ( self . _args [ 1 ] , matchConstraints = False ) : self . _args [ 1 ] = ObjectIdentity ( self . _args [ 1 ] ) . resolveWithMib ( mibViewController ) self . _state |= self . ST_CLEAM debug . logger & debug . FLAG_MIB and debug . logger ( 'resolved %r syntax is %r' % ( self . _args [ 0 ] , self . _args [ 1 ] ) ) return self
Perform MIB variable ID and associated value conversion .
509
11
231,332
def addVarBinds ( self , * varBinds ) : debug . logger & debug . FLAG_MIB and debug . logger ( 'additional var-binds: %r' % ( varBinds , ) ) if self . _state & self . ST_CLEAN : raise SmiError ( '%s object is already sealed' % self . __class__ . __name__ ) else : self . _additionalVarBinds . extend ( varBinds ) return self
Appends variable - binding to notification .
105
8
231,333
def resolveWithMib ( self , mibViewController ) : if self . _state & self . ST_CLEAN : return self self . _objectIdentity . resolveWithMib ( mibViewController ) self . _varBinds . append ( ObjectType ( ObjectIdentity ( v2c . apiTrapPDU . snmpTrapOID ) , self . _objectIdentity ) . resolveWithMib ( mibViewController ) ) SmiNotificationType , = mibViewController . mibBuilder . importSymbols ( 'SNMPv2-SMI' , 'NotificationType' ) mibNode = self . _objectIdentity . getMibNode ( ) varBindsLocation = { } if isinstance ( mibNode , SmiNotificationType ) : for notificationObject in mibNode . getObjects ( ) : objectIdentity = ObjectIdentity ( * notificationObject + self . _instanceIndex ) objectIdentity . resolveWithMib ( mibViewController ) objectType = ObjectType ( objectIdentity , self . _objects . get ( notificationObject , rfc1905 . unSpecified ) ) objectType . resolveWithMib ( mibViewController ) self . _varBinds . append ( objectType ) varBindsLocation [ objectIdentity ] = len ( self . _varBinds ) - 1 else : debug . logger & debug . FLAG_MIB and debug . logger ( 'WARNING: MIB object %r is not NOTIFICATION-TYPE (MIB not ' 'loaded?)' % ( self . _objectIdentity , ) ) for varBinds in self . _additionalVarBinds : if not isinstance ( varBinds , ObjectType ) : varBinds = ObjectType ( ObjectIdentity ( varBinds [ 0 ] ) , varBinds [ 1 ] ) varBinds . resolveWithMib ( mibViewController ) if varBinds [ 0 ] in varBindsLocation : self . _varBinds [ varBindsLocation [ varBinds [ 0 ] ] ] = varBinds else : self . _varBinds . append ( varBinds ) self . _additionalVarBinds = [ ] self . _state |= self . ST_CLEAN debug . logger & debug . FLAG_MIB and debug . logger ( 'resolved %r into %r' % ( self . _objectIdentity , self . _varBinds ) ) return self
Perform MIB variable ID conversion and notification objects expansion .
536
12
231,334
def withValues ( cls , * values ) : class X ( cls ) : subtypeSpec = cls . subtypeSpec + constraint . SingleValueConstraint ( * values ) X . __name__ = cls . __name__ return X
Creates a subclass with discreet values constraint .
54
9
231,335
def withRange ( cls , minimum , maximum ) : class X ( cls ) : subtypeSpec = cls . subtypeSpec + constraint . ValueRangeConstraint ( minimum , maximum ) X . __name__ = cls . __name__ return X
Creates a subclass with value range constraint .
56
9
231,336
def withNamedValues ( cls , * * values ) : enums = set ( cls . namedValues . items ( ) ) enums . update ( values . items ( ) ) class X ( cls ) : namedValues = namedval . NamedValues ( * enums ) subtypeSpec = cls . subtypeSpec + constraint . SingleValueConstraint ( * values . values ( ) ) X . __name__ = cls . __name__ return X
Create a subclass with discreet named values constraint .
100
9
231,337
def withSize ( cls , minimum , maximum ) : class X ( cls ) : subtypeSpec = cls . subtypeSpec + constraint . ValueSizeConstraint ( minimum , maximum ) X . __name__ = cls . __name__ return X
Creates a subclass with value size constraint .
56
9
231,338
def withNamedBits ( cls , * * values ) : enums = set ( cls . namedValues . items ( ) ) enums . update ( values . items ( ) ) class X ( cls ) : namedValues = namedval . NamedValues ( * enums ) X . __name__ = cls . __name__ return X
Creates a subclass with discreet named bits constraint .
75
10
231,339
def loadModule ( self , modName , * * userCtx ) : for mibSource in self . _mibSources : debug . logger & debug . FLAG_BLD and debug . logger ( 'loadModule: trying %s at %s' % ( modName , mibSource ) ) try : codeObj , sfx = mibSource . read ( modName ) except IOError as exc : debug . logger & debug . FLAG_BLD and debug . logger ( 'loadModule: read %s from %s failed: ' '%s' % ( modName , mibSource , exc ) ) continue modPath = mibSource . fullPath ( modName , sfx ) if modPath in self . _modPathsSeen : debug . logger & debug . FLAG_BLD and debug . logger ( 'loadModule: seen %s' % modPath ) break else : self . _modPathsSeen . add ( modPath ) debug . logger & debug . FLAG_BLD and debug . logger ( 'loadModule: evaluating %s' % modPath ) g = { 'mibBuilder' : self , 'userCtx' : userCtx } try : exec ( codeObj , g ) except Exception : self . _modPathsSeen . remove ( modPath ) raise error . MibLoadError ( 'MIB module "%s" load error: ' '%s' % ( modPath , traceback . format_exception ( * sys . exc_info ( ) ) ) ) self . _modSeen [ modName ] = modPath debug . logger & debug . FLAG_BLD and debug . logger ( 'loadModule: loaded %s' % modPath ) break if modName not in self . _modSeen : raise error . MibNotFoundError ( 'MIB file "%s" not found in search path ' '(%s)' % ( modName and modName + ".py[co]" , ', ' . join ( [ str ( x ) for x in self . _mibSources ] ) ) ) return self
Load and execute MIB modules as Python code
449
9
231,340
def nextCmd ( snmpDispatcher , authData , transportTarget , * varBinds , * * options ) : def cbFun ( * args , * * kwargs ) : response [ : ] = args + ( kwargs . get ( 'nextVarBinds' , ( ) ) , ) options [ 'cbFun' ] = cbFun lexicographicMode = options . pop ( 'lexicographicMode' , True ) maxRows = options . pop ( 'maxRows' , 0 ) maxCalls = options . pop ( 'maxCalls' , 0 ) initialVarBinds = VB_PROCESSOR . makeVarBinds ( snmpDispatcher . cache , varBinds ) totalRows = totalCalls = 0 errorIndication , errorStatus , errorIndex , varBindTable = None , 0 , 0 , ( ) response = [ ] while True : if not varBinds : yield ( errorIndication , errorStatus , errorIndex , varBindTable and varBindTable [ 0 ] or [ ] ) return cmdgen . nextCmd ( snmpDispatcher , authData , transportTarget , * [ ( x [ 0 ] , Null ( '' ) ) for x in varBinds ] , * * options ) snmpDispatcher . transportDispatcher . runDispatcher ( ) errorIndication , errorStatus , errorIndex , varBindTable , varBinds = response if errorIndication : yield ( errorIndication , errorStatus , errorIndex , varBindTable and varBindTable [ 0 ] or [ ] ) return elif errorStatus : if errorStatus == 2 : # Hide SNMPv1 noSuchName error which leaks in here # from SNMPv1 Agent through internal pysnmp proxy. errorStatus = errorStatus . clone ( 0 ) errorIndex = errorIndex . clone ( 0 ) yield ( errorIndication , errorStatus , errorIndex , varBindTable and varBindTable [ 0 ] or [ ] ) return else : varBindRow = varBindTable and varBindTable [ - 1 ] if not lexicographicMode : for idx , varBind in enumerate ( varBindRow ) : name , val = varBind if not isinstance ( val , Null ) : if initialVarBinds [ idx ] [ 0 ] . isPrefixOf ( name ) : break else : return for varBindRow in varBindTable : nextVarBinds = ( yield errorIndication , errorStatus , errorIndex , varBindRow ) if nextVarBinds : initialVarBinds = varBinds = VB_PROCESSOR . makeVarBinds ( snmpDispatcher . cache , nextVarBinds ) totalRows += 1 totalCalls += 1 if maxRows and totalRows >= maxRows : return if maxCalls and totalCalls >= maxCalls : return
Create a generator to perform one or more SNMP GETNEXT queries .
616
15
231,341
def registerContextEngineId ( self , contextEngineId , pduTypes , processPdu ) : # 4.3.2 -> no-op # 4.3.3 for pduType in pduTypes : k = contextEngineId , pduType if k in self . _appsRegistration : raise error . ProtocolError ( 'Duplicate registration %r/%s' % ( contextEngineId , pduType ) ) # 4.3.4 self . _appsRegistration [ k ] = processPdu debug . logger & debug . FLAG_DSP and debug . logger ( 'registerContextEngineId: contextEngineId %r pduTypes ' '%s' % ( contextEngineId , pduTypes ) )
Register application with dispatcher
156
4
231,342
def unregisterContextEngineId ( self , contextEngineId , pduTypes ) : # 4.3.4 if contextEngineId is None : # Default to local snmpEngineId contextEngineId , = self . mibInstrumController . mibBuilder . importSymbols ( '__SNMP-FRAMEWORK-MIB' , 'snmpEngineID' ) for pduType in pduTypes : k = contextEngineId , pduType if k in self . _appsRegistration : del self . _appsRegistration [ k ] debug . logger & debug . FLAG_DSP and debug . logger ( 'unregisterContextEngineId: contextEngineId %r pduTypes ' '%s' % ( contextEngineId , pduTypes ) )
Unregister application with dispatcher
166
5
231,343
def sendNotification ( snmpEngine , authData , transportTarget , contextData , notifyType , * varBinds , * * options ) : def __cbFun ( snmpEngine , sendRequestHandle , errorIndication , errorStatus , errorIndex , varBinds , cbCtx ) : lookupMib , deferred = cbCtx if errorIndication : deferred . errback ( Failure ( errorIndication ) ) else : try : varBinds = VB_PROCESSOR . unmakeVarBinds ( snmpEngine . cache , varBinds , lookupMib ) except Exception as e : deferred . errback ( Failure ( e ) ) else : deferred . callback ( ( errorStatus , errorIndex , varBinds ) ) notifyName = LCD . configure ( snmpEngine , authData , transportTarget , notifyType , contextData . contextName ) def __trapFun ( deferred ) : deferred . callback ( ( 0 , 0 , [ ] ) ) varBinds = VB_PROCESSOR . makeVarBinds ( snmpEngine . cache , varBinds ) deferred = Deferred ( ) ntforg . NotificationOriginator ( ) . sendVarBinds ( snmpEngine , notifyName , contextData . contextEngineId , contextData . contextName , varBinds , __cbFun , ( options . get ( 'lookupMib' , True ) , deferred ) ) if notifyType == 'trap' : reactor . callLater ( 0 , __trapFun , deferred ) return deferred
Sends SNMP notification .
325
6
231,344
def nextCmd ( snmpEngine , authData , transportTarget , contextData , * varBinds , * * options ) : def __cbFun ( snmpEngine , sendRequestHandle , errorIndication , errorStatus , errorIndex , varBindTable , cbCtx ) : lookupMib , deferred = cbCtx if ( options . get ( 'ignoreNonIncreasingOid' , False ) and errorIndication and isinstance ( errorIndication , errind . OidNotIncreasing ) ) : errorIndication = None if errorIndication : deferred . errback ( Failure ( errorIndication ) ) else : try : varBindTable = [ VB_PROCESSOR . unmakeVarBinds ( snmpEngine . cache , varBindTableRow , lookupMib ) for varBindTableRow in varBindTable ] except Exception as e : deferred . errback ( Failure ( e ) ) else : deferred . callback ( ( errorStatus , errorIndex , varBindTable ) ) addrName , paramsName = LCD . configure ( snmpEngine , authData , transportTarget , contextData . contextName ) varBinds = VB_PROCESSOR . makeVarBinds ( snmpEngine . cache , varBinds ) deferred = Deferred ( ) cmdgen . NextCommandGenerator ( ) . sendVarBinds ( snmpEngine , addrName , contextData . contextEngineId , contextData . contextName , varBinds , __cbFun , ( options . get ( 'lookupMib' , True ) , deferred ) ) return deferred
Performs SNMP GETNEXT query .
335
9
231,345
def getBranch ( self , name , * * context ) : for keyLen in self . _vars . getKeysLens ( ) : subName = name [ : keyLen ] if subName in self . _vars : return self . _vars [ subName ] raise error . NoSuchObjectError ( name = name , idx = context . get ( 'idx' ) )
Return a branch of this tree where the name OID may reside
84
13
231,346
def getNode ( self , name , * * context ) : if name == self . name : return self else : return self . getBranch ( name , * * context ) . getNode ( name , * * context )
Return tree node found by name
47
6
231,347
def getNextNode ( self , name , * * context ) : try : nextNode = self . getBranch ( name , * * context ) except ( error . NoSuchInstanceError , error . NoSuchObjectError ) : return self . getNextBranch ( name , * * context ) else : try : return nextNode . getNextNode ( name , * * context ) except ( error . NoSuchInstanceError , error . NoSuchObjectError ) : try : return self . _vars [ self . _vars . nextKey ( nextNode . name ) ] except KeyError : raise error . NoSuchObjectError ( name = name , idx = context . get ( 'idx' ) )
Return tree node next to name
150
6
231,348
def writeCommit ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: writeCommit(%s, %r)' % ( self , name , val ) ) ) cbFun = context [ 'cbFun' ] instances = context [ 'instances' ] . setdefault ( self . name , { self . ST_CREATE : { } , self . ST_DESTROY : { } } ) idx = context [ 'idx' ] if idx in instances [ self . ST_CREATE ] : self . createCommit ( varBind , * * context ) return if idx in instances [ self . ST_DESTROY ] : self . destroyCommit ( varBind , * * context ) return try : node = self . getBranch ( name , * * context ) except ( error . NoSuchInstanceError , error . NoSuchObjectError ) as exc : cbFun ( varBind , * * dict ( context , error = exc ) ) else : node . writeCommit ( varBind , * * context )
Commit new value of the Managed Object Instance .
247
12
231,349
def readGet ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: readGet(%s, %r)' % ( self , name , val ) ) ) cbFun = context [ 'cbFun' ] if name == self . name : cbFun ( ( name , exval . noSuchInstance ) , * * context ) return acFun = context . get ( 'acFun' ) if acFun : if ( self . maxAccess not in ( 'readonly' , 'readwrite' , 'readcreate' ) or acFun ( 'read' , ( name , self . syntax ) , * * context ) ) : cbFun ( ( name , exval . noSuchInstance ) , * * context ) return ManagedMibObject . readGet ( self , varBind , * * context )
Read Managed Object Instance .
197
7
231,350
def readGetNext ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: readGetNext(%s, %r)' % ( self , name , val ) ) ) acFun = context . get ( 'acFun' ) if acFun : if ( self . maxAccess not in ( 'readonly' , 'readwrite' , 'readcreate' ) or acFun ( 'read' , ( name , self . syntax ) , * * context ) ) : nextName = context . get ( 'nextName' ) if nextName : varBind = nextName , exval . noSuchInstance else : varBind = name , exval . endOfMibView cbFun = context [ 'cbFun' ] cbFun ( varBind , * * context ) return ManagedMibObject . readGetNext ( self , varBind , * * context )
Read the next Managed Object Instance .
208
9
231,351
def createCommit ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: writeCommit(%s, %r)' % ( self , name , val ) ) ) cbFun = context [ 'cbFun' ] instances = context [ 'instances' ] . setdefault ( self . name , { self . ST_CREATE : { } , self . ST_DESTROY : { } } ) idx = context [ 'idx' ] if name in self . _vars : cbFun ( varBind , * * context ) return # NOTE: multiple names are possible in a single PDU, that could collide # Therefore let's keep old object indexed by (negative) var-bind index self . _vars [ name ] , instances [ self . ST_CREATE ] [ - idx - 1 ] = instances [ self . ST_CREATE ] [ idx ] , self . _vars . get ( name ) instances [ self . ST_CREATE ] [ idx ] . writeCommit ( varBind , * * context )
Create Managed Object Instance .
250
7
231,352
def createCleanup ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: createCleanup(%s, %r)' % ( self , name , val ) ) ) instances = context [ 'instances' ] . setdefault ( self . name , { self . ST_CREATE : { } , self . ST_DESTROY : { } } ) idx = context [ 'idx' ] self . branchVersionId += 1 instances [ self . ST_CREATE ] . pop ( - idx - 1 , None ) self . _vars [ name ] . writeCleanup ( varBind , * * context )
Finalize Managed Object Instance creation .
158
9
231,353
def destroyCommit ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: destroyCommit(%s, %r)' % ( self , name , val ) ) ) instances = context [ 'instances' ] . setdefault ( self . name , { self . ST_CREATE : { } , self . ST_DESTROY : { } } ) idx = context [ 'idx' ] # NOTE: multiple names are possible in a single PDU, that could collide # Therefore let's keep old object indexed by (negative) var-bind index try : instances [ self . ST_DESTROY ] [ - idx - 1 ] = self . _vars . pop ( name ) except KeyError : pass cbFun = context [ 'cbFun' ] cbFun ( varBind , * * context )
Destroy Managed Object Instance .
200
7
231,354
def oidToValue ( self , syntax , identifier , impliedFlag = False , parentIndices = None ) : if not identifier : raise error . SmiError ( 'Short OID for index %r' % ( syntax , ) ) if hasattr ( syntax , 'cloneFromName' ) : return syntax . cloneFromName ( identifier , impliedFlag , parentRow = self , parentIndices = parentIndices ) baseTag = syntax . getTagSet ( ) . getBaseTag ( ) if baseTag == Integer . tagSet . getBaseTag ( ) : return syntax . clone ( identifier [ 0 ] ) , identifier [ 1 : ] elif IpAddress . tagSet . isSuperTagSetOf ( syntax . getTagSet ( ) ) : return syntax . clone ( '.' . join ( [ str ( x ) for x in identifier [ : 4 ] ] ) ) , identifier [ 4 : ] elif baseTag == OctetString . tagSet . getBaseTag ( ) : # rfc1902, 7.7 if impliedFlag : return syntax . clone ( tuple ( identifier ) ) , ( ) elif syntax . isFixedLength ( ) : l = syntax . getFixedLength ( ) return syntax . clone ( tuple ( identifier [ : l ] ) ) , identifier [ l : ] else : return syntax . clone ( tuple ( identifier [ 1 : identifier [ 0 ] + 1 ] ) ) , identifier [ identifier [ 0 ] + 1 : ] elif baseTag == ObjectIdentifier . tagSet . getBaseTag ( ) : if impliedFlag : return syntax . clone ( identifier ) , ( ) else : return syntax . clone ( identifier [ 1 : identifier [ 0 ] + 1 ] ) , identifier [ identifier [ 0 ] + 1 : ] # rfc2578, 7.1 elif baseTag == Bits . tagSet . getBaseTag ( ) : return syntax . clone ( tuple ( identifier [ 1 : identifier [ 0 ] + 1 ] ) ) , identifier [ identifier [ 0 ] + 1 : ] else : raise error . SmiError ( 'Unknown value type for index %r' % ( syntax , ) )
Turn SMI table instance identifier into a value object .
449
11
231,355
def valueToOid ( self , value , impliedFlag = False , parentIndices = None ) : if hasattr ( value , 'cloneAsName' ) : return value . cloneAsName ( impliedFlag , parentRow = self , parentIndices = parentIndices ) baseTag = value . getTagSet ( ) . getBaseTag ( ) if baseTag == Integer . tagSet . getBaseTag ( ) : return int ( value ) , elif IpAddress . tagSet . isSuperTagSetOf ( value . getTagSet ( ) ) : return value . asNumbers ( ) elif baseTag == OctetString . tagSet . getBaseTag ( ) : if impliedFlag or value . isFixedLength ( ) : initial = ( ) else : initial = ( len ( value ) , ) return initial + value . asNumbers ( ) elif baseTag == ObjectIdentifier . tagSet . getBaseTag ( ) : if impliedFlag : return tuple ( value ) else : return ( len ( value ) , ) + tuple ( value ) # rfc2578, 7.1 elif baseTag == Bits . tagSet . getBaseTag ( ) : return ( len ( value ) , ) + value . asNumbers ( ) else : raise error . SmiError ( 'Unknown value type for index %r' % ( value , ) )
Turn value object into SMI table instance identifier .
286
10
231,356
def announceManagementEvent ( self , action , varBind , * * context ) : name , val = varBind cbFun = context [ 'cbFun' ] if not self . _augmentingRows : cbFun ( varBind , * * context ) return # Convert OID suffix into index values instId = name [ len ( self . name ) + 1 : ] baseIndices = [ ] indices = [ ] for impliedFlag , modName , symName in self . _indexNames : mibObj , = mibBuilder . importSymbols ( modName , symName ) syntax , instId = self . oidToValue ( mibObj . syntax , instId , impliedFlag , indices ) if self . name == mibObj . name [ : - 1 ] : baseIndices . append ( ( mibObj . name , syntax ) ) indices . append ( syntax ) if instId : exc = error . SmiError ( 'Excessive instance identifier sub-OIDs left at %s: %s' % ( self , instId ) ) cbFun ( varBind , * * dict ( context , error = exc ) ) return if not baseIndices : cbFun ( varBind , * * context ) return count = [ len ( self . _augmentingRows ) ] def _cbFun ( varBind , * * context ) : count [ 0 ] -= 1 if not count [ 0 ] : cbFun ( varBind , * * context ) for modName , mibSym in self . _augmentingRows : mibObj , = mibBuilder . importSymbols ( modName , mibSym ) mibObj . receiveManagementEvent ( action , ( baseIndices , val ) , * * dict ( context , cbFun = _cbFun ) ) debug . logger & debug . FLAG_INS and debug . logger ( 'announceManagementEvent %s to %s' % ( action , mibObj ) )
Announce mass operation on parent table s row .
422
10
231,357
def receiveManagementEvent ( self , action , varBind , * * context ) : baseIndices , val = varBind # The default implementation supports one-to-one rows dependency instId = ( ) # Resolve indices intersection for impliedFlag , modName , symName in self . _indexNames : mibObj , = mibBuilder . importSymbols ( modName , symName ) parentIndices = [ ] for name , syntax in baseIndices : if name == mibObj . name : instId += self . valueToOid ( syntax , impliedFlag , parentIndices ) parentIndices . append ( syntax ) if instId : debug . logger & debug . FLAG_INS and debug . logger ( 'receiveManagementEvent %s for suffix %s' % ( action , instId ) ) self . _manageColumns ( action , ( self . name + ( 0 , ) + instId , val ) , * * context )
Apply mass operation on extending table s row .
203
9
231,358
def registerAugmentation ( self , * names ) : for name in names : if name in self . _augmentingRows : raise error . SmiError ( 'Row %s already augmented by %s::%s' % ( self . name , name [ 0 ] , name [ 1 ] ) ) self . _augmentingRows . add ( name ) return self
Register table extension .
80
4
231,359
def _manageColumns ( self , action , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: _manageColumns(%s, %s, %r)' % ( self , action , name , val ) ) ) cbFun = context [ 'cbFun' ] colLen = len ( self . name ) + 1 # Build a map of index names and values for automatic initialization indexVals = { } instId = name [ colLen : ] indices = [ ] for impliedFlag , modName , symName in self . _indexNames : mibObj , = mibBuilder . importSymbols ( modName , symName ) syntax , instId = self . oidToValue ( mibObj . syntax , instId , impliedFlag , indices ) indexVals [ mibObj . name ] = syntax indices . append ( syntax ) count = [ len ( self . _vars ) ] if name [ : colLen ] in self . _vars : count [ 0 ] -= 1 def _cbFun ( varBind , * * context ) : count [ 0 ] -= 1 if not count [ 0 ] : cbFun ( varBind , * * context ) for colName , colObj in self . _vars . items ( ) : acFun = context . get ( 'acFun' ) if colName in indexVals : colInstanceValue = indexVals [ colName ] # Index column is usually read-only acFun = None elif name [ : colLen ] == colName : # status column is following `write` path continue else : colInstanceValue = None actionFun = getattr ( colObj , action ) colInstanceName = colName + name [ colLen : ] actionFun ( ( colInstanceName , colInstanceValue ) , * * dict ( context , acFun = acFun , cbFun = _cbFun ) ) debug . logger & debug . FLAG_INS and debug . logger ( '_manageColumns: action %s name %s instance %s %svalue %r' % ( action , name , instId , name in indexVals and "index " or "" , indexVals . get ( name , val ) ) )
Apply a management action on all columns
491
7
231,360
def _checkColumns ( self , varBind , * * context ) : name , val = varBind ( debug . logger & debug . FLAG_INS and debug . logger ( '%s: _checkColumns(%s, %r)' % ( self , name , val ) ) ) cbFun = context [ 'cbFun' ] # RowStatus != active if val != 1 : cbFun ( varBind , * * context ) return count = [ len ( self . _vars ) ] def _cbFun ( varBind , * * context ) : count [ 0 ] -= 1 name , val = varBind if count [ 0 ] >= 0 : exc = context . get ( 'error' ) if exc or not val . hasValue ( ) : count [ 0 ] = - 1 # ignore the rest of callbacks exc = error . InconsistentValueError ( msg = 'Inconsistent column %s: %s' % ( name , exc ) ) cbFun ( varBind , * * dict ( context , error = exc ) ) return if not count [ 0 ] : cbFun ( varBind , * * context ) return colLen = len ( self . name ) + 1 for colName , colObj in self . _vars . items ( ) : instName = colName + name [ colLen : ] colObj . readGet ( ( instName , None ) , * * dict ( context , cbFun = _cbFun ) ) debug . logger & debug . FLAG_INS and debug . logger ( '%s: _checkColumns: checking instance %s' % ( self , instName ) )
Check the consistency of all columns .
349
7
231,361
def getIndicesFromInstId ( self , instId ) : if instId in self . _idToIdxCache : return self . _idToIdxCache [ instId ] indices = [ ] for impliedFlag , modName , symName in self . _indexNames : mibObj , = mibBuilder . importSymbols ( modName , symName ) try : syntax , instId = self . oidToValue ( mibObj . syntax , instId , impliedFlag , indices ) except PyAsn1Error as exc : debug . logger & debug . FLAG_INS and debug . logger ( 'error resolving table indices at %s, %s: %s' % ( self . __class__ . __name__ , instId , exc ) ) indices = [ instId ] instId = ( ) break indices . append ( syntax ) # to avoid cyclic refs if instId : raise error . SmiError ( 'Excessive instance identifier sub-OIDs left at %s: %s' % ( self , instId ) ) indices = tuple ( indices ) self . _idToIdxCache [ instId ] = indices return indices
Return index values for instance identification
248
6
231,362
def getInstIdFromIndices ( self , * indices ) : try : return self . _idxToIdCache [ indices ] except TypeError : cacheable = False except KeyError : cacheable = True idx = 0 instId = ( ) parentIndices = [ ] for impliedFlag , modName , symName in self . _indexNames : if idx >= len ( indices ) : break mibObj , = mibBuilder . importSymbols ( modName , symName ) syntax = mibObj . syntax . clone ( indices [ idx ] ) instId += self . valueToOid ( syntax , impliedFlag , parentIndices ) parentIndices . append ( syntax ) idx += 1 if cacheable : self . _idxToIdCache [ indices ] = instId return instId
Return column instance identification from indices
173
6
231,363
def getInstNameByIndex ( self , colId , * indices ) : return self . name + ( colId , ) + self . getInstIdFromIndices ( * indices )
Build column instance name from components
39
6
231,364
def getInstNamesByIndex ( self , * indices ) : instNames = [ ] for columnName in self . _vars . keys ( ) : instNames . append ( self . getInstNameByIndex ( * ( columnName [ - 1 ] , ) + indices ) ) return tuple ( instNames )
Build column instance names from indices
65
6
231,365
def nextCmd ( snmpEngine , authData , transportTarget , contextData , * varBinds , * * options ) : # noinspection PyShadowingNames def cbFun ( snmpEngine , sendRequestHandle , errorIndication , errorStatus , errorIndex , varBindTable , cbCtx ) : cbCtx [ 'errorIndication' ] = errorIndication cbCtx [ 'errorStatus' ] = errorStatus cbCtx [ 'errorIndex' ] = errorIndex cbCtx [ 'varBindTable' ] = varBindTable lexicographicMode = options . get ( 'lexicographicMode' , True ) ignoreNonIncreasingOid = options . get ( 'ignoreNonIncreasingOid' , False ) maxRows = options . get ( 'maxRows' , 0 ) maxCalls = options . get ( 'maxCalls' , 0 ) cbCtx = { } vbProcessor = CommandGeneratorVarBinds ( ) initialVars = [ x [ 0 ] for x in vbProcessor . makeVarBinds ( snmpEngine . cache , varBinds ) ] totalRows = totalCalls = 0 while True : previousVarBinds = varBinds if varBinds : cmdgen . nextCmd ( snmpEngine , authData , transportTarget , contextData , * [ ( x [ 0 ] , Null ( '' ) ) for x in varBinds ] , cbFun = cbFun , cbCtx = cbCtx , lookupMib = options . get ( 'lookupMib' , True ) ) snmpEngine . transportDispatcher . runDispatcher ( ) errorIndication = cbCtx [ 'errorIndication' ] errorStatus = cbCtx [ 'errorStatus' ] errorIndex = cbCtx [ 'errorIndex' ] if ignoreNonIncreasingOid and errorIndication and isinstance ( errorIndication , errind . OidNotIncreasing ) : errorIndication = None if errorIndication : yield ( errorIndication , errorStatus , errorIndex , varBinds ) return elif errorStatus : if errorStatus == 2 : # Hide SNMPv1 noSuchName error which leaks in here # from SNMPv1 Agent through internal pysnmp proxy. errorStatus = errorStatus . clone ( 0 ) errorIndex = errorIndex . clone ( 0 ) yield ( errorIndication , errorStatus , errorIndex , varBinds ) return else : stopFlag = True varBinds = cbCtx [ 'varBindTable' ] and cbCtx [ 'varBindTable' ] [ 0 ] for col , varBind in enumerate ( varBinds ) : name , val = varBind if isinstance ( val , Null ) : varBinds [ col ] = previousVarBinds [ col ] [ 0 ] , endOfMibView if not lexicographicMode and not initialVars [ col ] . isPrefixOf ( name ) : varBinds [ col ] = previousVarBinds [ col ] [ 0 ] , endOfMibView if stopFlag and varBinds [ col ] [ 1 ] is not endOfMibView : stopFlag = False if stopFlag : return totalRows += 1 totalCalls += 1 else : errorIndication = errorStatus = errorIndex = None varBinds = [ ] initialVarBinds = ( yield errorIndication , errorStatus , errorIndex , varBinds ) if initialVarBinds : varBinds = initialVarBinds initialVars = [ x [ 0 ] for x in vbProcessor . makeVarBinds ( snmpEngine . cache , varBinds ) ] if maxRows and totalRows >= maxRows : return if maxCalls and totalCalls >= maxCalls : return
Creates a generator to perform one or more SNMP GETNEXT queries .
828
16
231,366
def _storeAccessContext ( snmpEngine ) : execCtx = snmpEngine . observer . getExecutionContext ( 'rfc3412.receiveMessage:request' ) return { 'securityModel' : execCtx [ 'securityModel' ] , 'securityName' : execCtx [ 'securityName' ] , 'securityLevel' : execCtx [ 'securityLevel' ] , 'contextName' : execCtx [ 'contextName' ] , 'pduType' : execCtx [ 'pdu' ] . getTagSet ( ) }
Copy received message metadata while it lasts
123
7
231,367
def _getManagedObjectsInstances ( self , varBinds , * * context ) : rspVarBinds = context [ 'rspVarBinds' ] varBindsMap = context [ 'varBindsMap' ] rtrVarBinds = [ ] for idx , varBind in enumerate ( varBinds ) : name , val = varBind if ( exval . noSuchObject . isSameTypeWith ( val ) or exval . noSuchInstance . isSameTypeWith ( val ) ) : varBindsMap [ len ( rtrVarBinds ) ] = varBindsMap . pop ( idx , idx ) rtrVarBinds . append ( varBind ) else : rspVarBinds [ varBindsMap . pop ( idx , idx ) ] = varBind if rtrVarBinds : snmpEngine = context [ 'snmpEngine' ] # Need to unwind stack, can't recurse any more def callLater ( * args ) : snmpEngine . transportDispatcher . unregisterTimerCbFun ( callLater ) mgmtFun = context [ 'mgmtFun' ] mgmtFun ( * varBinds , * * context ) snmpEngine . transportDispatcher . registerTimerCbFun ( callLater , 0.01 ) else : return rspVarBinds
Iterate over Managed Objects fulfilling SNMP query .
291
11
231,368
def clone ( self , value = univ . noValue , * * kwargs ) : cloned = univ . Choice . clone ( self , * * kwargs ) if value is not univ . noValue : if isinstance ( value , NetworkAddress ) : value = value . getComponent ( ) elif not isinstance ( value , IpAddress ) : # IpAddress is the only supported type, perhaps forever because # this is SNMPv1. value = IpAddress ( value ) try : tagSet = value . tagSet except AttributeError : raise PyAsn1Error ( 'component value %r has no tag set' % ( value , ) ) cloned . setComponentByType ( tagSet , value ) return cloned
Clone this instance .
161
5
231,369
def _defaultErrorHandler ( varBinds , * * context ) : errors = context . get ( 'errors' ) if errors : err = errors [ - 1 ] raise err [ 'error' ]
Raise exception on any error if user callback is missing
42
11
231,370
def readMibObjects ( self , * varBinds , * * context ) : if 'cbFun' not in context : context [ 'cbFun' ] = self . _defaultErrorHandler self . flipFlopFsm ( self . FSM_READ_VAR , * varBinds , * * context )
Read Managed Objects Instances .
69
7
231,371
def readNextMibObjects ( self , * varBinds , * * context ) : if 'cbFun' not in context : context [ 'cbFun' ] = self . _defaultErrorHandler self . flipFlopFsm ( self . FSM_READ_NEXT_VAR , * varBinds , * * context )
Read Managed Objects Instances next to the given ones .
73
12
231,372
def writeMibObjects ( self , * varBinds , * * context ) : if 'cbFun' not in context : context [ 'cbFun' ] = self . _defaultErrorHandler self . flipFlopFsm ( self . FSM_WRITE_VAR , * varBinds , * * context )
Create destroy or modify Managed Objects Instances .
70
10
231,373
def bulkCmd ( snmpDispatcher , authData , transportTarget , nonRepeaters , maxRepetitions , * varBinds , * * options ) : def _cbFun ( snmpDispatcher , stateHandle , errorIndication , rspPdu , _cbCtx ) : if not cbFun : return if errorIndication : cbFun ( errorIndication , pMod . Integer ( 0 ) , pMod . Integer ( 0 ) , None , cbCtx = cbCtx , snmpDispatcher = snmpDispatcher , stateHandle = stateHandle ) return errorStatus = pMod . apiBulkPDU . getErrorStatus ( rspPdu ) errorIndex = pMod . apiBulkPDU . getErrorIndex ( rspPdu ) varBindTable = pMod . apiBulkPDU . getVarBindTable ( reqPdu , rspPdu ) errorIndication , nextVarBinds = pMod . apiBulkPDU . getNextVarBinds ( varBindTable [ - 1 ] , errorIndex = errorIndex ) if options . get ( 'lookupMib' ) : varBindTable = [ VB_PROCESSOR . unmakeVarBinds ( snmpDispatcher . cache , vbs ) for vbs in varBindTable ] nextStateHandle = pMod . getNextRequestID ( ) nextVarBinds = cbFun ( errorIndication , errorStatus , errorIndex , varBindTable , cbCtx = cbCtx , snmpDispatcher = snmpDispatcher , stateHandle = stateHandle , nextStateHandle = nextStateHandle , nextVarBinds = nextVarBinds ) if not nextVarBinds : return pMod . apiBulkPDU . setRequestID ( reqPdu , nextStateHandle ) pMod . apiBulkPDU . setVarBinds ( reqPdu , nextVarBinds ) return snmpDispatcher . sendPdu ( authData , transportTarget , reqPdu , cbFun = _cbFun ) if authData . mpModel < 1 : raise error . PySnmpError ( 'GETBULK PDU is only supported in SNMPv2c and SNMPv3' ) lookupMib , cbFun , cbCtx = [ options . get ( x ) for x in ( 'lookupMib' , 'cbFun' , 'cbCtx' ) ] if lookupMib : varBinds = VB_PROCESSOR . makeVarBinds ( snmpDispatcher . cache , varBinds ) pMod = api . PROTOCOL_MODULES [ authData . mpModel ] reqPdu = pMod . GetBulkRequestPDU ( ) pMod . apiBulkPDU . setDefaults ( reqPdu ) pMod . apiBulkPDU . setNonRepeaters ( reqPdu , nonRepeaters ) pMod . apiBulkPDU . setMaxRepetitions ( reqPdu , maxRepetitions ) pMod . apiBulkPDU . setVarBinds ( reqPdu , varBinds ) return snmpDispatcher . sendPdu ( authData , transportTarget , reqPdu , cbFun = _cbFun )
Initiate SNMP GETBULK query over SNMPv2c .
717
17
231,374
def save ( self ) : if self . mode in ( "wb+" , 'rb+' ) : if not self . is_open : raise IOError ( "file closed" ) self . write_reference_properties ( ) self . manager . write_objects ( )
Writes current changes to disk and flushes modified objects in the AAFObjectManager
58
17
231,375
def close ( self ) : self . save ( ) self . manager . remove_temp ( ) self . cfb . close ( ) self . is_open = False self . f . close ( )
Close the file . A closed file cannot be read or written any more .
42
15
231,376
def run_apidoc ( _ ) : import os dirname = os . path . dirname ( __file__ ) ignore_paths = [ os . path . join ( dirname , '../../aaf2/model' ) , ] # https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/apidoc.py argv = [ '--force' , '--no-toc' , '--separate' , '--module-first' , '--output-dir' , os . path . join ( dirname , 'api' ) , os . path . join ( dirname , '../../aaf2' ) , ] + ignore_paths from sphinx . ext import apidoc apidoc . main ( argv )
This method is required by the setup method below .
179
10
231,377
def from_dict ( self , d ) : self . length = d . get ( "length" , 0 ) self . instanceHigh = d . get ( "instanceHigh" , 0 ) self . instanceMid = d . get ( "instanceMid" , 0 ) self . instanceLow = d . get ( "instanceLow" , 0 ) material = d . get ( "material" , { 'Data1' : 0 , 'Data2' : 0 , 'Data3' : 0 , 'Data4' : [ 0 for i in range ( 8 ) ] } ) self . Data1 = material . get ( 'Data1' , 0 ) self . Data2 = material . get ( 'Data2' , 0 ) self . Data3 = material . get ( 'Data3' , 0 ) self . Data4 = material . get ( "Data4" , [ 0 for i in range ( 8 ) ] ) self . SMPTELabel = d . get ( "SMPTELabel" , [ 0 for i in range ( 12 ) ] )
Set MobID from a dict
225
6
231,378
def to_dict ( self ) : material = { 'Data1' : self . Data1 , 'Data2' : self . Data2 , 'Data3' : self . Data3 , 'Data4' : list ( self . Data4 ) } return { 'material' : material , 'length' : self . length , 'instanceHigh' : self . instanceHigh , 'instanceMid' : self . instanceMid , 'instanceLow' : self . instanceLow , 'SMPTELabel' : list ( self . SMPTELabel ) }
MobID representation as dict
119
5
231,379
def wave_infochunk ( path ) : with open ( path , 'rb' ) as file : if file . read ( 4 ) != b"RIFF" : return None data_size = file . read ( 4 ) # container size if file . read ( 4 ) != b"WAVE" : return None while True : chunkid = file . read ( 4 ) sizebuf = file . read ( 4 ) if len ( sizebuf ) < 4 or len ( chunkid ) < 4 : return None size = struct . unpack ( b'<L' , sizebuf ) [ 0 ] if chunkid [ 0 : 3 ] != b"fmt" : if size % 2 == 1 : seek = size + 1 else : seek = size file . seek ( size , 1 ) else : return bytearray ( b"RIFF" + data_size + b"WAVE" + chunkid + sizebuf + file . read ( size ) )
Returns a bytearray of the WAVE RIFF header and fmt chunk for a WAVEDescriptor Summary
203
25
231,380
def pop ( self ) : entry = self parent = self . parent root = parent . child ( ) dir_per_sector = self . storage . sector_size // 128 max_dirs_entries = self . storage . dir_sector_count * dir_per_sector count = 0 if root . dir_id == entry . dir_id : parent . child_id = None else : # find dir entry pointing to self while True : if count > max_dirs_entries : raise CompoundFileBinaryError ( "max dir entries limit reached" ) if entry < root : if root . left_id == entry . dir_id : root . left_id = None break root = root . left ( ) else : if root . right_id == entry . dir_id : # root right is pointing to self root . right_id = None break root = root . right ( ) count += 1 left = entry . left ( ) right = entry . right ( ) # clear from cache if parent . dir_id in self . storage . children_cache : del self . storage . children_cache [ parent . dir_id ] [ entry . name ] if left : del self . storage . children_cache [ parent . dir_id ] [ left . name ] if right : del self . storage . children_cache [ parent . dir_id ] [ right . name ] if left is not None : parent . add_child ( left ) if right is not None : parent . add_child ( right ) # clear parent and left and right self . left_id = None self . right_id = None self . parent = None
remove self from binary search tree
347
6
231,381
def remove ( self , path ) : entry = self . find ( path ) if not entry : raise ValueError ( "%s does not exists" % path ) if entry . type == 'root storage' : raise ValueError ( "can no remove root entry" ) if entry . type == "storage" and not entry . child_id is None : raise ValueError ( "storage contains children" ) entry . pop ( ) # remove stream data if entry . type == "stream" : self . free_fat_chain ( entry . sector_id , entry . byte_size < self . min_stream_max_size ) self . free_dir_entry ( entry )
Removes both streams and storage DirEntry types from file . storage type entries need to be empty dirs .
141
22
231,382
def rmtree ( self , path ) : for root , storage , streams in self . walk ( path , topdown = False ) : for item in streams : self . free_fat_chain ( item . sector_id , item . byte_size < self . min_stream_max_size ) self . free_dir_entry ( item ) for item in storage : self . free_dir_entry ( item ) root . child_id = None # remove root item self . remove ( path )
Removes directory structure similar to shutil . rmtree .
106
13
231,383
def listdir_dict ( self , path = None ) : if path is None : path = self . root root = self . find ( path ) if root is None : raise ValueError ( "unable to find dir: %s" % str ( path ) ) if not root . isdir ( ) : raise ValueError ( "can only list storage types" ) children = self . children_cache . get ( root . dir_id , None ) if children is not None : return children child = root . child ( ) result = { } if not child : self . children_cache [ root . dir_id ] = result return result dir_per_sector = self . sector_size // 128 max_dirs_entries = self . dir_sector_count * dir_per_sector stack = deque ( [ child ] ) count = 0 while stack : current = stack . pop ( ) result [ current . name ] = current count += 1 if count > max_dirs_entries : raise CompoundFileBinaryError ( "corrupt folder structure" ) left = current . left ( ) if left : stack . append ( left ) right = current . right ( ) if right : stack . append ( right ) self . children_cache [ root . dir_id ] = result return result
Return a dict containing the DirEntry objects in the directory given by path with name of the dir as key .
275
22
231,384
def makedir ( self , path , class_id = None ) : return self . create_dir_entry ( path , dir_type = 'storage' , class_id = class_id )
Create a storage DirEntry name path
43
7
231,385
def makedirs ( self , path ) : root = "" assert path . startswith ( '/' ) p = path . strip ( '/' ) for item in p . split ( '/' ) : root += "/" + item if not self . exists ( root ) : self . makedir ( root ) return self . find ( path )
Recursive storage DirEntry creation function .
73
8
231,386
def move ( self , src , dst ) : src_entry = self . find ( src ) if src_entry is None : raise ValueError ( "src path does not exist: %s" % src ) if dst . endswith ( '/' ) : dst += src_entry . name if self . exists ( dst ) : raise ValueError ( "dst path already exist: %s" % dst ) if dst == '/' or src == '/' : raise ValueError ( "cannot overwrite root dir" ) split_path = dst . strip ( '/' ) . split ( '/' ) dst_basename = split_path [ - 1 ] dst_dirname = '/' + '/' . join ( split_path [ : - 1 ] ) # print(dst) # print(dst_basename, dst_dirname) dst_entry = self . find ( dst_dirname ) if dst_entry is None : raise ValueError ( "src path does not exist: %s" % dst_dirname ) if not dst_entry . isdir ( ) : raise ValueError ( "dst dirname cannot be stream: %s" % dst_dirname ) # src_entry.parent.remove_child(src_entry) src_entry . pop ( ) src_entry . parent = None src_entry . name = dst_basename dst_entry . add_child ( src_entry ) self . children_cache [ dst_entry . dir_id ] [ src_entry . name ] = src_entry return src_entry
Moves DirEntry from src to dst
334
8
231,387
def open ( self , path , mode = 'r' ) : entry = self . find ( path ) if entry is None : if mode == 'r' : raise ValueError ( "stream does not exists: %s" % path ) entry = self . create_dir_entry ( path , 'stream' , None ) else : if not entry . isfile ( ) : raise ValueError ( "can only open stream type DirEntry's" ) if mode == 'w' : logging . debug ( "stream: %s exists, overwriting" % path ) self . free_fat_chain ( entry . sector_id , entry . byte_size < self . min_stream_max_size ) entry . sector_id = None entry . byte_size = 0 entry . class_id = None elif mode == 'rw' : pass s = Stream ( self , entry , mode ) return s
Open stream returning Stream object
191
5
231,388
def add2set ( self , pid , key , value ) : prop = self . property_entries [ pid ] current = prop . objects . get ( key , None ) current_local_key = prop . references . get ( key , None ) if current and current is not value : current . detach ( ) if current_local_key is None : prop . references [ key ] = prop . next_free_key prop . next_free_key += 1 prop . objects [ key ] = value if prop . parent . dir : ref = prop . index_ref_name ( key ) dir_entry = prop . parent . dir . get ( ref ) if dir_entry is None : dir_entry = prop . parent . dir . makedir ( ref ) if value . dir != dir_entry : value . attach ( dir_entry ) prop . mark_modified ( )
low level add to StrongRefSetProperty
186
8
231,389
def histogram_info ( self ) -> dict : return { 'support_atoms' : self . support_atoms , 'atom_delta' : self . atom_delta , 'vmin' : self . vmin , 'vmax' : self . vmax , 'num_atoms' : self . atoms }
Return extra information about histogram
72
6
231,390
def sample ( self , histogram_logits ) : histogram_probs = histogram_logits . exp ( ) # Batch size * actions * atoms atoms = self . support_atoms . view ( 1 , 1 , self . atoms ) # Need to introduce two new dimensions return ( histogram_probs * atoms ) . sum ( dim = - 1 ) . argmax ( dim = 1 )
Sample from a greedy strategy with given q - value histogram
87
12
231,391
def download ( self ) : if not os . path . exists ( self . data_path ) : # Create if it doesn't exist pathlib . Path ( self . data_path ) . mkdir ( parents = True , exist_ok = True ) if not os . path . exists ( self . text_path ) : http = urllib3 . PoolManager ( cert_reqs = 'CERT_REQUIRED' , ca_certs = certifi . where ( ) ) with open ( self . text_path , 'wt' ) as fp : request = http . request ( 'GET' , self . url ) content = request . data . decode ( 'utf8' ) fp . write ( content ) if not os . path . exists ( self . processed_path ) : with open ( self . text_path , 'rt' ) as fp : content = fp . read ( ) alphabet = sorted ( set ( content ) ) index_to_character = { idx : c for idx , c in enumerate ( alphabet , 1 ) } character_to_index = { c : idx for idx , c in enumerate ( alphabet , 1 ) } content_encoded = np . array ( [ character_to_index [ c ] for c in content ] , dtype = np . uint8 ) data_dict = { 'alphabet' : alphabet , 'index_to_character' : index_to_character , 'character_to_index' : character_to_index , 'content_encoded' : content_encoded } with open ( self . processed_path , 'wb' ) as fp : torch . save ( data_dict , fp ) else : with open ( self . processed_path , 'rb' ) as fp : data_dict = torch . load ( fp ) return data_dict
Make sure data file is downloaded and stored properly
400
9
231,392
def explained_variance ( returns , values ) : exp_var = 1 - torch . var ( returns - values ) / torch . var ( returns ) return exp_var . item ( )
Calculate how much variance in returns do the values explain
40
12
231,393
def create ( model_config , path , num_workers , batch_size , augmentations = None , tta = None ) : if not os . path . isabs ( path ) : path = model_config . project_top_dir ( path ) train_path = os . path . join ( path , 'train' ) valid_path = os . path . join ( path , 'valid' ) train_ds = ImageDirSource ( train_path ) val_ds = ImageDirSource ( valid_path ) return TrainingData ( train_ds , val_ds , num_workers = num_workers , batch_size = batch_size , augmentations = augmentations , # test_time_augmentation=tta )
Create an ImageDirSource with supplied arguments
155
8
231,394
def reset_weights ( self ) : self . input_block . reset_weights ( ) self . backbone . reset_weights ( ) self . q_head . reset_weights ( )
Initialize weights to reasonable defaults
39
6
231,395
def result ( self ) : return { k : torch . stack ( v ) for k , v in self . accumulants . items ( ) }
Concatenate accumulated tensors
31
7
231,396
def resolve_parameters ( self , func , extra_env = None ) : parameter_list = [ ( k , v . default == inspect . Parameter . empty ) for k , v in inspect . signature ( func ) . parameters . items ( ) ] extra_env = extra_env if extra_env is not None else { } kwargs = { } for parameter_name , is_required in parameter_list : # extra_env is a 'local' object data defined in-place if parameter_name in extra_env : kwargs [ parameter_name ] = self . instantiate_from_data ( extra_env [ parameter_name ] ) continue if parameter_name in self . instances : kwargs [ parameter_name ] = self . instances [ parameter_name ] continue if parameter_name in self . environment : kwargs [ parameter_name ] = self . instantiate_by_name ( parameter_name ) continue if is_required : funcname = f"{inspect.getmodule(func).__name__}.{func.__name__}" raise RuntimeError ( "Required argument '{}' cannot be resolved for function '{}'" . format ( parameter_name , funcname ) ) return kwargs
Resolve parameter dictionary for the supplied function
266
8
231,397
def resolve_and_call ( self , func , extra_env = None ) : kwargs = self . resolve_parameters ( func , extra_env = extra_env ) return func ( * * kwargs )
Resolve function arguments and call them possibily filling from the environment
48
14
231,398
def instantiate_from_data ( self , object_data ) : if isinstance ( object_data , dict ) and 'name' in object_data : name = object_data [ 'name' ] module = importlib . import_module ( name ) return self . resolve_and_call ( module . create , extra_env = object_data ) if isinstance ( object_data , dict ) and 'factory' in object_data : factory = object_data [ 'factory' ] module = importlib . import_module ( factory ) params = self . resolve_parameters ( module . create , extra_env = object_data ) return GenericFactory ( module . create , params ) elif isinstance ( object_data , dict ) : return { k : self . instantiate_from_data ( v ) for k , v in object_data . items ( ) } elif isinstance ( object_data , list ) : return [ self . instantiate_from_data ( x ) for x in object_data ] elif isinstance ( object_data , Variable ) : return object_data . resolve ( self . parameters ) else : return object_data
Instantiate object from the supplied data additional args may come from the environment
250
14
231,399
def render_configuration ( self , configuration = None ) : if configuration is None : configuration = self . environment if isinstance ( configuration , dict ) : return { k : self . render_configuration ( v ) for k , v in configuration . items ( ) } elif isinstance ( configuration , list ) : return [ self . render_configuration ( x ) for x in configuration ] elif isinstance ( configuration , Variable ) : return configuration . resolve ( self . parameters ) else : return configuration
Render variables in configuration object but don t instantiate anything
105
11