idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
23,800
async def add_unknown_id ( self , unknown_id , timeout = OTGW_DEFAULT_TIMEOUT ) : cmd = OTGW_CMD_UNKNOWN_ID unknown_id = int ( unknown_id ) if unknown_id < 1 or unknown_id > 255 : return None ret = await self . _wait_for_cmd ( cmd , unknown_id , timeout ) if ret is not None : return int ( ret )
Inform the gateway that the boiler doesn t support the specified Data - ID even if the boiler doesn t indicate that by returning an Unknown - DataId response . Using this command allows the gateway to send an alternative Data - ID to the boiler instead . Return the added ID or None on failure .
95
59
23,801
async def del_unknown_id ( self , unknown_id , timeout = OTGW_DEFAULT_TIMEOUT ) : cmd = OTGW_CMD_KNOWN_ID unknown_id = int ( unknown_id ) if unknown_id < 1 or unknown_id > 255 : return None ret = await self . _wait_for_cmd ( cmd , unknown_id , timeout ) if ret is not None : return int ( ret )
Start forwarding the specified Data - ID to the boiler again . This command resets the counter used to determine if the specified Data - ID is supported by the boiler . Return the ID that was marked as supported or None on failure .
94
46
23,802
async def set_max_ch_setpoint ( self , temperature , timeout = OTGW_DEFAULT_TIMEOUT ) : cmd = OTGW_CMD_SET_MAX status = { } ret = await self . _wait_for_cmd ( cmd , temperature , timeout ) if ret is None : return ret = float ( ret ) status [ DATA_MAX_CH_SETPOINT ] = ret self . _update_status ( status ) return ret
Set the maximum central heating setpoint . This command is only available with boilers that support this function . Return the newly accepted setpoint or None on failure .
98
32
23,803
async def set_dhw_setpoint ( self , temperature , timeout = OTGW_DEFAULT_TIMEOUT ) : cmd = OTGW_CMD_SET_WATER status = { } ret = await self . _wait_for_cmd ( cmd , temperature , timeout ) if ret is None : return ret = float ( ret ) status [ DATA_DHW_SETPOINT ] = ret self . _update_status ( status ) return ret
Set the domestic hot water setpoint . This command is only available with boilers that support this function . Return the newly accepted setpoint or None on failure .
97
32
23,804
async def set_max_relative_mod ( self , max_mod , timeout = OTGW_DEFAULT_TIMEOUT ) : if isinstance ( max_mod , int ) and not 0 <= max_mod <= 100 : return None cmd = OTGW_CMD_MAX_MOD status = { } ret = await self . _wait_for_cmd ( cmd , max_mod , timeout ) if ret not in [ '-' , None ] : ret = int ( ret ) if ret == '-' : status [ DATA_SLAVE_MAX_RELATIVE_MOD ] = None else : status [ DATA_SLAVE_MAX_RELATIVE_MOD ] = ret self . _update_status ( status ) return ret
Override the maximum relative modulation from the thermostat . Valid values are 0 through 100 . Clear the setting by specifying a non - numeric value . Return the newly accepted value - if a previous value was cleared or None on failure .
156
46
23,805
async def set_control_setpoint ( self , setpoint , timeout = OTGW_DEFAULT_TIMEOUT ) : cmd = OTGW_CMD_CONTROL_SETPOINT status = { } ret = await self . _wait_for_cmd ( cmd , setpoint , timeout ) if ret is None : return ret = float ( ret ) status [ DATA_CONTROL_SETPOINT ] = ret self . _update_status ( status ) return ret
Manipulate the control setpoint being sent to the boiler . Set to 0 to pass along the value specified by the thermostat . Return the newly accepted value or None on failure .
102
38
23,806
async def _send_report ( self , status ) : if len ( self . _notify ) > 0 : # Each client gets its own copy of the dict. asyncio . gather ( * [ coro ( dict ( status ) ) for coro in self . _notify ] , loop = self . loop )
Call all subscribed coroutines in _notify whenever a status update occurs .
68
16
23,807
async def _poll_gpio ( self , poll , interval = 10 ) : if poll and self . _gpio_task is None : async def polling_routine ( interval ) : """Poll GPIO state every @interval seconds.""" while True : try : pios = None ret = await self . _wait_for_cmd ( OTGW_CMD_REPORT , OTGW_REPORT_GPIO_STATES ) if ret : pios = ret [ 2 : ] status = { OTGW_GPIO_A_STATE : int ( pios [ 0 ] ) , OTGW_GPIO_B_STATE : int ( pios [ 1 ] ) , } self . _update_status ( status ) await asyncio . sleep ( interval ) except asyncio . CancelledError : status = { OTGW_GPIO_A_STATE : 0 , OTGW_GPIO_B_STATE : 0 , } self . _update_status ( status ) self . _gpio_task = None break self . _gpio_task = self . loop . create_task ( polling_routine ( interval ) ) elif not poll and self . _gpio_task is not None : self . _gpio_task . cancel ( )
Start or stop polling GPIO states .
270
7
23,808
def _update_status ( self , update ) : if isinstance ( update , dict ) : self . _protocol . status . update ( update ) self . _protocol . _updateq . put_nowait ( self . _protocol . status )
Update the status dict and push it to subscribers .
55
10
23,809
def write_dockerfile ( self , output_dir ) : if not os . path . exists ( output_dir ) : os . makedirs ( output_dir ) lines = [ ] for istep , step in enumerate ( self . steps ) : if istep == 0 : lines . extend ( step . dockerfile_lines ) else : lines . extend ( step . dockerfile_lines [ 1 : ] ) path = os . path . join ( output_dir , 'Dockerfile.%s' % self . imagename ) with open ( path , 'w' ) as outfile : outfile . write ( '\n' . join ( lines ) ) print ( 'Wrote %s' % path )
Used only to write a Dockerfile that will NOT be built by docker - make
155
16
23,810
def build ( self , client , nobuild = False , usecache = True , pull = False ) : if not nobuild : self . update_source_images ( client , usecache = usecache , pull = pull ) width = utils . get_console_width ( ) cprint ( '\n' + '=' * width , color = 'white' , attrs = [ 'bold' ] ) line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % ( self . targetname , self . imagename , self . steps [ - 1 ] . sourcefile ) cprint ( _centered ( line , width ) , color = 'blue' , attrs = [ 'bold' ] ) for istep , step in enumerate ( self . steps ) : print ( colored ( '* Step' , 'blue' ) , colored ( '%d/%d' % ( istep + 1 , len ( self . steps ) ) , 'blue' , attrs = [ 'bold' ] ) , colored ( 'for image' , color = 'blue' ) , colored ( self . imagename , color = 'blue' , attrs = [ 'bold' ] ) ) if not nobuild : if step . bust_cache : stackkey = self . _get_stack_key ( istep ) if stackkey in _rebuilt : step . bust_cache = False step . build ( client , usecache = usecache ) print ( colored ( "* Created intermediate image" , 'green' ) , colored ( step . buildname , 'green' , attrs = [ 'bold' ] ) , end = '\n\n' ) if step . bust_cache : _rebuilt . add ( stackkey ) finalimage = step . buildname if not nobuild : self . finalizenames ( client , finalimage ) line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)' % ( self . targetname , self . imagename , self . steps [ - 1 ] . sourcefile ) cprint ( _centered ( line , width ) , color = 'green' , attrs = [ 'bold' ] ) cprint ( '=' * width , color = 'white' , attrs = [ 'bold' ] , end = '\n\n' )
Drives the build of the final image - get the list of steps and execute them .
508
18
23,811
def finalizenames ( self , client , finalimage ) : client . api . tag ( finalimage , * self . targetname . split ( ':' ) ) cprint ( 'Tagged final image as "%s"' % self . targetname , 'green' ) if not self . keepbuildtags : print ( 'Untagging intermediate containers:' , end = '' ) for step in self . steps : client . api . remove_image ( step . buildname , force = True ) print ( step . buildname , end = ',' ) print ( )
Tag the built image with its final name and untag intermediate containers
116
13
23,812
def _resolve_squash_cache ( self , client ) : from . staging import BUILD_CACHEDIR history = client . api . history ( self . buildname ) comment = history [ 0 ] . get ( 'Comment' , '' ) . split ( ) if len ( comment ) != 4 or comment [ 0 ] != 'merge' or comment [ 2 ] != 'to' : print ( 'WARNING: failed to parse this image\'s pre-squash history. ' 'The build will continue, but all subsequent layers will be rebuilt.' ) return squashed_sha = history [ 0 ] [ 'Id' ] start_squash_sha = comment [ 1 ] end_squash_sha = comment [ 3 ] cprint ( ' Layers %s to %s were squashed.' % ( start_squash_sha , end_squash_sha ) , 'yellow' ) # check cache squashcache = os . path . join ( BUILD_CACHEDIR , 'squashes' ) if not os . path . exists ( squashcache ) : os . makedirs ( squashcache ) cachepath = os . path . join ( BUILD_CACHEDIR , 'squashes' , '%s-%s' % ( start_squash_sha , end_squash_sha ) ) # on hit, tag the squashedsha as the result of this build step if os . path . exists ( cachepath ) : self . _get_squashed_layer_cache ( client , squashed_sha , cachepath ) else : self . _cache_squashed_layer ( squashed_sha , cachepath )
Currently doing a squash basically negates the cache for any subsequent layers . But we can work around this by A ) checking if the cache was successful for the _unsquashed_ version of the image and B ) if so re - using an older squashed version of the image .
353
57
23,813
def dockerfile_lines ( self ) : w1 = colored ( 'WARNING: this build includes files that are built in other images!!! The generated' '\n Dockerfile must be built in a directory that contains' ' the file/directory:' , 'red' , attrs = [ 'bold' ] ) w2 = colored ( ' ' + self . sourcepath , 'red' ) w3 = ( colored ( ' from image ' , 'red' ) + colored ( self . sourcepath , 'blue' , attrs = [ 'bold' ] ) ) print ( '\n' . join ( ( w1 , w2 , w3 ) ) ) return [ "" , "# Warning: the file \"%s\" from the image \"%s\"" " must be present in this build context!!" % ( self . sourcepath , self . sourceimage ) , "ADD %s %s" % ( os . path . basename ( self . sourcepath ) , self . destpath ) , '' ]
Used only when printing dockerfiles not for building
213
9
23,814
def _check_yaml_and_paths ( ymlfilepath , yamldefs ) : relpath = os . path . relpath ( ymlfilepath ) if '/' not in relpath : relpath = './%s' % relpath pathroot = os . path . abspath ( os . path . dirname ( ymlfilepath ) ) for imagename , defn in iteritems ( yamldefs ) : if imagename == '_SOURCES_' : yamldefs [ '_SOURCES_' ] = [ os . path . relpath ( _get_abspath ( pathroot , p ) ) for p in yamldefs [ '_SOURCES_' ] ] continue elif imagename in SPECIAL_FIELDS : continue for key in ( 'build_directory' , 'FROM_DOCKERFILE' , 'ignorefile' ) : if key in defn : defn [ key ] = _get_abspath ( pathroot , defn [ key ] ) if 'copy_from' in defn : if not isinstance ( defn [ 'copy_from' ] , dict ) : raise errors . ParsingFailure ( ( 'Syntax error in file "%s": \n' + 'The "copy_from" field in image definition "%s" is not \n' 'a key:value list.' ) % ( ymlfilepath , imagename ) ) for otherimg , value in defn . get ( 'copy_from' , { } ) . items ( ) : if not isinstance ( value , dict ) : raise errors . ParsingFailure ( ( 'Syntax error in field:\n' ' %s . copy_from . %s\nin file "%s". \n' 'All entries must be of the form "sourcepath: destpath"' ) % ( imagename , otherimg , ymlfilepath ) ) # save the file path for logging defn [ '_sourcefile' ] = relpath if 'ignore' in defn and 'ignorefile' in defn : raise errors . MultipleIgnoreError ( 'Image "%s" has both "ignore" AND "ignorefile" fields.' % imagename + ' At most ONE of these should be defined' ) if 'secret_files' in defn and not defn . get ( 'squash' , True ) : raise errors . ParsingFailure ( "Step '%s' defines secret_files, so 'squash' cannot be set to 'false'" % imagename ) if defn . get ( 'secret_files' , None ) and defn . get ( 'copy_from' , False ) : raise errors . ParsingFailure ( '`secret_files` currently is not implmemented to handle `copy_from`' ' (step %s)' % imagename ) for key in defn : if key not in RECOGNIZED_KEYS : raise errors . UnrecognizedKeyError ( 'Field "%s" in image "%s" in file "%s" not recognized' % ( key , imagename , relpath ) )
Checks YAML for errors and resolves all paths
684
11
23,815
def generate_build ( self , image , targetname , rebuilds = None , cache_repo = '' , cache_tag = '' , buildargs = None , * * kwargs ) : from_image = self . get_external_base_image ( image ) if cache_repo or cache_tag : cache_from = utils . generate_name ( image , cache_repo , cache_tag ) else : cache_from = None if from_image is None : raise errors . NoBaseError ( "No base image found in %s's dependencies" % image ) if isinstance ( from_image , ExternalDockerfile ) : build_first = from_image base_image = from_image . tag else : base_image = from_image build_first = None build_steps = [ ] istep = 0 sourceimages = set ( ) if rebuilds is None : rebuilds = [ ] else : rebuilds = set ( rebuilds ) for base_name in self . sort_dependencies ( image ) : istep += 1 buildname = 'dmkbuild_%s_%d' % ( image , istep ) secret_files = self . ymldefs [ base_name ] . get ( 'secret_files' , None ) squash = self . ymldefs [ base_name ] . get ( 'squash' , bool ( secret_files ) ) build_steps . append ( dockermake . step . BuildStep ( base_name , base_image , self . ymldefs [ base_name ] , buildname , bust_cache = base_name in rebuilds , build_first = build_first , cache_from = cache_from , buildargs = buildargs , squash = squash , secret_files = secret_files ) ) base_image = buildname build_first = None for sourceimage , files in iteritems ( self . ymldefs [ base_name ] . get ( 'copy_from' , { } ) ) : sourceimages . add ( sourceimage ) for sourcepath , destpath in iteritems ( files ) : istep += 1 buildname = 'dmkbuild_%s_%d' % ( image , istep ) build_steps . append ( dockermake . step . FileCopyStep ( sourceimage , sourcepath , destpath , base_name , base_image , self . ymldefs [ base_name ] , buildname , bust_cache = base_name in rebuilds , build_first = build_first , cache_from = cache_from ) ) base_image = buildname sourcebuilds = [ self . generate_build ( img , img , cache_repo = cache_repo , cache_tag = cache_tag , * * kwargs ) for img in sourceimages ] return builds . BuildTarget ( imagename = image , targetname = targetname , steps = build_steps , sourcebuilds = sourcebuilds , from_image = from_image , * * kwargs )
Separate the build into a series of one or more intermediate steps . Each specified build directory gets its own step
657
22
23,816
def sort_dependencies ( self , image , dependencies = None ) : if dependencies is None : dependencies = OrderedDict ( ) # using this as an ordered set - not storing any values if image in dependencies : return requires = self . ymldefs [ image ] . get ( 'requires' , [ ] ) for dep in requires : self . sort_dependencies ( dep , dependencies ) dependencies [ image ] = None return dependencies . keys ( )
Topologically sort the docker commands by their requirements
96
9
23,817
def get_external_base_image ( self , image , stack = None ) : if stack is None : stack = list ( ) mydef = self . ymldefs [ image ] if image in stack : stack . append ( image ) raise errors . CircularDependencyError ( 'Circular dependency found:\n' + '->' . join ( stack ) ) stack . append ( image ) # Deal with FROM and FROM_DOCKERFILE fields if 'FROM' in mydef and 'FROM_DOCKERFILE' in mydef : raise errors . MultipleBaseError ( 'ERROR: Image "%s" has both a "FROM" and a "FROM_DOCKERFILE" field.' % image + ' It should have at most ONE of these fields.' ) if 'FROM' in mydef : externalbase = mydef [ 'FROM' ] elif 'FROM_DOCKERFILE' in mydef : path = mydef [ 'FROM_DOCKERFILE' ] if path not in self . _external_dockerfiles : self . _external_dockerfiles [ path ] = ExternalDockerfile ( path ) externalbase = self . _external_dockerfiles [ path ] else : externalbase = None requires = mydef . get ( 'requires' , [ ] ) if not isinstance ( requires , list ) : raise errors . InvalidRequiresList ( 'Requirements for image "%s" are not a list' % image ) for base in requires : try : otherexternal = self . get_external_base_image ( base , stack ) except ValueError : continue if externalbase is None : externalbase = otherexternal elif otherexternal is None : continue elif externalbase != otherexternal : raise errors . ConflictingBaseError ( 'Multiple external dependencies: definition "%s" depends on:\n' % image + ' %s (FROM: %s), and\n' % ( image , externalbase ) + ' %s (FROM: %s).' % ( base , otherexternal ) ) assert stack . pop ( ) == image return externalbase
Makes sure that this image has exactly one unique external base image
458
13
23,818
def stage ( self , startimage , newimage ) : client = utils . get_client ( ) cprint ( ' Copying file from "%s:/%s" \n to "%s://%s/"' % ( self . sourceimage , self . sourcepath , startimage , self . destpath ) , 'blue' ) # copy build artifacts from the container if necessary cachedir = self . _setcache ( client ) cacherelpath = os . path . relpath ( cachedir , TMPDIR ) # if cached file doesn't exist (presumably purged by OS), trigger it to be recreated if os . path . exists ( cachedir ) and not os . path . exists ( os . path . join ( cachedir , 'content.tar' ) ) : shutil . rmtree ( cachedir ) if not os . path . exists ( cachedir ) : print ( ' * Creating cache at %s' % cacherelpath ) container = client . containers . create ( self . sourceimage ) try : tarfile_stream , tarfile_stats = container . get_archive ( self . sourcepath ) except docker . errors . NotFound : raise errors . MissingFileError ( 'Cannot copy file "%s" from image "%s" - it does not exist!' % ( self . sourcepath , self . sourceimage ) ) # write files to disk (would be nice to stream them, haven't gotten it to work) tempdir = tempfile . mkdtemp ( dir = BUILD_TEMPDIR ) with open ( os . path . join ( tempdir , 'content.tar' ) , 'wb' ) as localfile : for chunk in tarfile_stream : localfile . write ( chunk ) os . mkdir ( cachedir ) os . rename ( tempdir , cachedir ) else : print ( ' Using cached files from %s' % cacherelpath ) # write Dockerfile for the new image and then build it dockerfile = 'FROM %s\nADD content.tar %s' % ( startimage , self . destpath ) with open ( os . path . join ( cachedir , 'Dockerfile' ) , 'w' ) as df : df . write ( dockerfile ) buildargs = dict ( path = cachedir , tag = newimage , decode = True ) utils . set_build_cachefrom ( self . cache_from , buildargs , client ) # Build and show logs stream = client . api . build ( * * buildargs ) try : utils . stream_docker_logs ( stream , newimage ) except ValueError as e : raise errors . BuildError ( dockerfile , e . args [ 0 ] , build_args = buildargs )
Copies the file from source to target
583
8
23,819
def _runargs ( argstring ) : import shlex parser = cli . make_arg_parser ( ) args = parser . parse_args ( shlex . split ( argstring ) ) run ( args )
Entrypoint for debugging
45
4
23,820
def lookup ( source , keys , fallback = None ) : try : for key in keys : source = source [ key ] return source except ( KeyError , AttributeError , TypeError ) : return fallback
Traverses the source looking up each key . Returns None if can t find anything instead of raising an exception .
44
23
23,821
def run ( self ) : while True : try : try : name , value , valueType , stamp = self . queue . get ( ) except TypeError : break self . log ( name , value , valueType , stamp ) finally : self . queue . task_done ( )
Run the thread .
58
4
23,822
def connect ( self ) : if self . sock is not None : return backoff = 0.01 while True : try : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) sock . settimeout ( 5 ) sock . connect ( ( self . host , self . port ) ) self . sock = sock return except socket . error : time . sleep ( random . uniform ( 0 , 2.0 * backoff ) ) backoff = min ( backoff * 2.0 , 5.0 )
Connects to the Graphite server if not already connected .
113
12
23,823
def disconnect ( self ) : if self . sock is not None : try : self . sock . close ( ) except socket . error : pass finally : self . sock = None
Disconnect from the Graphite server if connected .
36
10
23,824
def _sendMsg ( self , msg ) : if not self . sock : self . connect ( ) if not isinstance ( msg , binary_type ) : msg = msg . encode ( "UTF-8" ) backoff = 0.001 while True : try : self . sock . sendall ( msg ) break except socket . error : log . warning ( 'Graphite connection error' , exc_info = True ) self . disconnect ( ) time . sleep ( random . uniform ( 0 , 2.0 * backoff ) ) backoff = min ( backoff * 2.0 , 5.0 ) self . connect ( )
Send a line to graphite . Retry with exponential backoff .
132
14
23,825
def log ( self , name , value , valueType = None , stamp = None ) : if type ( value ) == float : form = "%s%s %2.2f %d\n" else : form = "%s%s %s %d\n" if valueType is not None and len ( valueType ) > 0 and valueType [ 0 ] != '.' : valueType = '.' + valueType if not stamp : stamp = time . time ( ) self . _sendMsg ( form % ( self . _sanitizeName ( name ) , valueType or '' , value , stamp ) )
Log a named numeric value . The value type may be value count or None .
131
16
23,826
def enqueue ( self , name , value , valueType = None , stamp = None ) : # If queue is too large, refuse to log. if self . maxQueueSize and self . queue . qsize ( ) > self . maxQueueSize : return # Stick arguments into the queue self . queue . put ( ( name , value , valueType , stamp ) )
Enqueue a call to log .
77
7
23,827
def update ( self , function ) : with self . lock : oldValue = self . value self . value = function ( oldValue ) return oldValue , self . value
Atomically apply function to the value and return the old and new values .
35
16
23,828
def tick ( self ) : count = self . _uncounted . getAndSet ( 0 ) instantRate = float ( count ) / self . interval if self . _initialized : self . rate += ( self . alpha * ( instantRate - self . rate ) ) else : self . rate = instantRate self . _initialized = True
Updates rates and decays
70
6
23,829
def statsId ( obj ) : if hasattr ( obj , ID_KEY ) : return getattr ( obj , ID_KEY ) newId = next ( NEXT_ID ) setattr ( obj , ID_KEY , newId ) return newId
Gets a unique ID for each object .
52
9
23,830
def filterCollapsedItems ( data ) : return ( ( key , value ) for key , value in six . iteritems ( data ) if not ( isinstance ( value , StatContainer ) and value . isCollapsed ( ) ) )
Return a filtered iteration over a list of items .
49
10
23,831
def dumpStatsTo ( filename ) : with open ( filename , 'w' ) as f : latest = getStats ( ) latest [ 'last-updated' ] = time . time ( ) json . dump ( getStats ( ) , f , cls = StatContainerEncoder )
Writes the stats dict to filanem
59
9
23,832
def collection ( path , * stats ) : def initMethod ( self ) : """Init method for the underlying stat object's class.""" init ( self , path ) attributes = { '__init__' : initMethod } for stat in stats : attributes [ stat . getName ( ) ] = stat newClass = type ( 'Stats:%s' % path , ( object , ) , attributes ) instance = newClass ( ) for stat in stats : default = stat . _getInit ( ) # Consider this method package-protected. # pylint: disable=W0212 if default : setattr ( instance , stat . getName ( ) , default ) return instance
Creates a named stats collection object .
140
8
23,833
def reset ( cls ) : cls . stats = StatContainer ( ) cls . parentMap = { } cls . containerMap = { } cls . subId = 0 for stat in gc . get_objects ( ) : if isinstance ( stat , Stat ) : stat . _aggregators = { }
Resets the static state . Should only be called by tests .
69
13
23,834
def init ( cls , obj , context ) : addr = statsId ( obj ) if addr not in cls . containerMap : cls . containerMap [ addr ] = cls . __getStatContainer ( context ) return cls . containerMap [ addr ]
Implementation of init .
56
5
23,835
def initChild ( cls , obj , name , subContext , parent = None ) : addr = statsId ( obj ) if addr not in cls . containerMap : if not parent : # Find out the parent of the calling object by going back through the call stack until a self != this. f = inspect . currentframe ( ) while not cls . __getSelf ( f ) : f = f . f_back this = cls . __getSelf ( f ) f = f . f_back while cls . __getSelf ( f ) == this or not cls . __getSelf ( f ) : f = f . f_back parent = cls . __getSelf ( f ) # Default subcontext to an autoincrementing ID. if subContext is None : cls . subId += 1 subContext = cls . subId if subContext is not '' : path = '%s/%s' % ( name , subContext ) else : path = name # Now that we have the name, create an entry for this object. cls . parentMap [ addr ] = parent container = cls . getContainerForObject ( statsId ( parent ) ) if not container and isinstance ( parent , unittest . TestCase ) : cls . init ( parent , '/test-case' ) cls . containerMap [ addr ] = cls . __getStatContainer ( path , cls . getContainerForObject ( statsId ( parent ) ) ) return cls . containerMap [ addr ]
Implementation of initChild .
324
6
23,836
def __getStatContainer ( cls , context , parent = None ) : container = parent if container is None : container = cls . stats if context is not None : context = str ( context ) . lstrip ( '/' ) for key in context . split ( '/' ) : container . setdefault ( key , StatContainer ( ) ) container = container [ key ] return container
Get the stat container for the given context under the given parent .
80
13
23,837
def getStat ( cls , obj , name ) : objClass = type ( obj ) for theClass in objClass . __mro__ : if theClass == object : break for value in theClass . __dict__ . values ( ) : if isinstance ( value , Stat ) and value . getName ( ) == name : return value
Gets the stat for the given object with the given name or None if no such stat exists .
72
20
23,838
def getAggregator ( cls , instanceId , name ) : parent = cls . parentMap . get ( instanceId ) while parent : stat = cls . getStat ( parent , name ) if stat : return stat , parent parent = cls . parentMap . get ( statsId ( parent ) )
Gets the aggregate stat for the given stat .
66
10
23,839
def _aggregate ( self , instanceId , container , value , subKey = None ) : # Get the aggregator. if instanceId not in self . _aggregators : self . _aggregators [ instanceId ] = _Stats . getAggregator ( instanceId , self . __name ) aggregator = self . _aggregators [ instanceId ] # If we are aggregating, get the old value. if aggregator : oldValue = container . get ( self . __name ) if subKey : oldValue = oldValue [ subKey ] aggregator [ 0 ] . update ( aggregator [ 1 ] , oldValue , value , subKey ) else : aggregator [ 0 ] . update ( aggregator [ 1 ] , oldValue , value )
Performs stat aggregation .
162
5
23,840
def updateItem ( self , instance , subKey , value ) : instanceId = statsId ( instance ) container = _Stats . getContainerForObject ( instanceId ) self . _aggregate ( instanceId , container , value , subKey )
Updates a child value . Must be called before the update has actually occurred .
51
16
23,841
def incr ( self , item , value ) : if item in self : old = UserDict . __getitem__ ( self , item ) else : old = 0.0 self [ item ] = old + value
Increment a key by the given amount .
46
9
23,842
def addSource ( self , source , data ) : self . _aggregate ( source , self . _aggregators , data , self . _result )
Adds the given source s stats .
33
7
23,843
def addJsonDirectory ( self , directory , test = None ) : for filename in os . listdir ( directory ) : try : fullPath = os . path . join ( directory , filename ) if not test or test ( filename , fullPath ) : with open ( fullPath ) as f : jsonData = json . load ( f ) name , _ = os . path . splitext ( filename ) self . addSource ( name , jsonData ) except ValueError : continue
Adds data from json files in the given directory .
100
10
23,844
def mean ( self ) : if len ( self ) == 0 : return float ( 'NaN' ) arr = self . samples ( ) return sum ( arr ) / float ( len ( arr ) )
Return the sample mean .
42
5
23,845
def stddev ( self ) : if len ( self ) < 2 : return float ( 'NaN' ) # The stupidest algorithm, but it works fine. try : arr = self . samples ( ) mean = sum ( arr ) / len ( arr ) bigsum = 0.0 for x in arr : bigsum += ( x - mean ) ** 2 return sqrt ( bigsum / ( len ( arr ) - 1 ) ) except ZeroDivisionError : return float ( 'NaN' )
Return the sample standard deviation .
106
6
23,846
def clear ( self ) : self . __init__ ( size = self . size , alpha = self . alpha , clock = self . clock )
Clear the samples .
30
4
23,847
def update ( self , value ) : super ( ExponentiallyDecayingReservoir , self ) . update ( value ) timestamp = self . clock . time ( ) self . __rescaleIfNeeded ( ) priority = self . __weight ( timestamp - self . startTime ) / random . random ( ) self . count += 1 if ( self . count <= self . size ) : self . values [ priority ] = value else : first = min ( self . values ) if first < priority and priority not in self . values : self . values [ priority ] = value while first not in self . values : first = min ( self . values ) del self . values [ first ]
Adds an old value with a fixed timestamp to the reservoir .
142
12
23,848
def clear ( self ) : for i in range ( len ( self . sample ) ) : self . sample [ i ] = 0.0 self . count = 0
Clear the sample .
34
4
23,849
def update ( self , value ) : super ( UniformSample , self ) . update ( value ) self . count += 1 c = self . count if c < len ( self . sample ) : self . sample [ c - 1 ] = value else : r = random . randint ( 0 , c ) if r < len ( self . sample ) : self . sample [ r ] = value
Add a value to the sample .
81
7
23,850
def _forbidden ( self , path , value ) : if path [ 0 ] == '/' : path = path [ 1 : ] for rule in reversed ( self . rules ) : if isinstance ( rule [ 1 ] , six . string_types ) : if fnmatch ( path , rule [ 1 ] ) : return not rule [ 0 ] elif rule [ 1 ] ( path , value ) : return not rule [ 0 ] return True
Is a stat forbidden? Goes through the rules to find one that applies . Chronologically newer rules are higher - precedence than older ones . If no rule applies the stat is forbidden by default .
92
38
23,851
def _pruned ( self , path ) : if path [ 0 ] == '/' : path = path [ 1 : ] for rule in reversed ( self . pruneRules ) : if isinstance ( rule , six . string_types ) : if fnmatch ( path , rule ) : return True elif rule ( path ) : return True return False
Is a stat tree node pruned? Goes through the list of prune rules to find one that applies . Chronologically newer rules are higher - precedence than older ones . If no rule applies the stat is not pruned by default .
73
47
23,852
def push ( self , statsDict = None , prefix = None , path = None ) : if statsDict is None : statsDict = scales . getStats ( ) prefix = prefix or self . prefix path = path or '/' for name , value in list ( statsDict . items ( ) ) : name = str ( name ) subpath = os . path . join ( path , name ) if self . _pruned ( subpath ) : continue if hasattr ( value , '__call__' ) : try : value = value ( ) except : # pylint: disable=W0702 value = None log . exception ( 'Error when calling stat function for graphite push' ) if hasattr ( value , 'items' ) : self . push ( value , '%s%s.' % ( prefix , self . _sanitize ( name ) ) , subpath ) elif self . _forbidden ( subpath , value ) : continue if six . PY3 : type_values = ( int , float ) else : type_values = ( int , long , float ) if type ( value ) in type_values and len ( name ) < 500 : self . graphite . log ( prefix + self . _sanitize ( name ) , value )
Push stat values out to Graphite .
271
8
23,853
def run ( self ) : self . graphite . start ( ) while True : log . debug ( 'Graphite pusher is sleeping for %d seconds' , self . period ) time . sleep ( self . period ) log . debug ( 'Pushing stats to Graphite' ) try : self . push ( ) log . debug ( 'Done pushing stats to Graphite' ) except : log . exception ( 'Exception while pushing stats to Graphite' ) raise
Loop forever pushing out stats .
96
6
23,854
def installStatsLoop ( statsFile , statsDelay ) : def dumpStats ( ) : """Actual stats dump function.""" scales . dumpStatsTo ( statsFile ) reactor . callLater ( statsDelay , dumpStats ) def startStats ( ) : """Starts the stats dump in "statsDelay" seconds.""" reactor . callLater ( statsDelay , dumpStats ) reactor . callWhenRunning ( startStats )
Installs an interval loop that dumps stats to a file .
90
12
23,855
def runQuery ( statDict , query ) : parts = [ x . strip ( ) for x in OPERATOR . split ( query ) ] assert len ( parts ) in ( 1 , 3 ) queryKey = parts [ 0 ] result = { } for key , value in six . iteritems ( statDict ) : if key == queryKey : if len ( parts ) == 3 : op = OPERATORS [ parts [ 1 ] ] try : queryValue = type ( value ) ( parts [ 2 ] ) if value else parts [ 2 ] except ( TypeError , ValueError ) : continue if not op ( value , queryValue ) : continue result [ key ] = value elif isinstance ( value , scales . StatContainer ) or isinstance ( value , dict ) : child = runQuery ( value , query ) if child : result [ key ] = child return result
Filters for the given query .
182
7
23,856
def htmlHeader ( output , path , serverName , query = None ) : if path and path != '/' : output . write ( '<title>%s - Status: %s</title>' % ( serverName , path ) ) else : output . write ( '<title>%s - Status</title>' % serverName ) output . write ( ''' <style> body,td { font-family: monospace } .level div { padding-bottom: 4px; } .level .level { margin-left: 2em; padding: 1px 0; } span { color: #090; vertical-align: top } .key { color: black; font-weight: bold } .int, .float { color: #00c } </style> ''' ) output . write ( '<h1 style="margin: 0">Stats</h1>' ) output . write ( '<h3 style="margin: 3px 0 18px">%s</h3>' % serverName ) output . write ( '<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>' % ( query or '' ) )
Writes an HTML header .
267
6
23,857
def htmlFormat ( output , pathParts = ( ) , statDict = None , query = None ) : statDict = statDict or scales . getStats ( ) if query : statDict = runQuery ( statDict , query ) _htmlRenderDict ( pathParts , statDict , output )
Formats as HTML writing to the given object .
68
10
23,858
def _htmlRenderDict ( pathParts , statDict , output ) : keys = list ( statDict . keys ( ) ) keys . sort ( ) links = [ ] output . write ( '<div class="level">' ) for key in keys : keyStr = cgi . escape ( _utf8str ( key ) ) value = statDict [ key ] if hasattr ( value , '__call__' ) : value = value ( ) if hasattr ( value , 'keys' ) : valuePath = pathParts + ( keyStr , ) if isinstance ( value , scales . StatContainer ) and value . isCollapsed ( ) : link = '/status/' + '/' . join ( valuePath ) links . append ( '<div class="key"><a href="%s">%s</a></div>' % ( link , keyStr ) ) else : output . write ( '<div class="key">%s</div>' % keyStr ) _htmlRenderDict ( valuePath , value , output ) else : output . write ( '<div><span class="key">%s</span> <span class="%s">%s</span></div>' % ( keyStr , type ( value ) . __name__ , cgi . escape ( _utf8str ( value ) ) . replace ( '\n' , '<br/>' ) ) ) if links : for link in links : output . write ( link ) output . write ( '</div>' )
Render a dictionary as a table - recursing as necessary .
326
13
23,859
def jsonFormat ( output , statDict = None , query = None , pretty = False ) : statDict = statDict or scales . getStats ( ) if query : statDict = runQuery ( statDict , query ) indent = 2 if pretty else None # At first, assume that strings are in UTF-8. If this fails -- if, for example, we have # crazy binary data -- then in order to get *something* out, we assume ISO-8859-1, # which maps each byte to a unicode code point. try : serialized = json . dumps ( statDict , cls = scales . StatContainerEncoder , indent = indent ) except UnicodeDecodeError : serialized = json . dumps ( statDict , cls = scales . StatContainerEncoder , indent = indent , encoding = 'iso-8859-1' ) output . write ( serialized ) output . write ( '\n' )
Formats as JSON writing to the given object .
203
10
23,860
def RepeatTimer ( interval , function , iterations = 0 , * args , * * kwargs ) : def __repeat_timer ( interval , function , iterations , args , kwargs ) : """Inner function, run in background thread.""" count = 0 while iterations <= 0 or count < iterations : sleep ( interval ) function ( * args , * * kwargs ) count += 1 return start_new_thread ( __repeat_timer , ( interval , function , iterations , args , kwargs ) )
Repeating timer . Returns a thread id .
108
9
23,861
def get_config ( context ) : conf_vars = [ 'disqus_developer' , 'disqus_identifier' , 'disqus_url' , 'disqus_title' , 'disqus_category_id' ] js = '\tvar {} = "{}";' output = [ js . format ( item , context [ item ] ) for item in conf_vars if item in context ] return '\n' . join ( output )
Return the formatted javascript for any disqus config variables .
101
11
23,862
def disqus_show_comments ( context , shortname = '' ) : shortname = getattr ( settings , 'DISQUS_WEBSITE_SHORTNAME' , shortname ) return { 'shortname' : shortname , 'config' : get_config ( context ) , }
Return the HTML code to display DISQUS comments .
63
11
23,863
def add_item ( self , title , link , description , author_email = None , author_name = None , author_link = None , pubdate = None , comments = None , unique_id = None , enclosure = None , categories = ( ) , item_copyright = None , ttl = None , * * kwargs ) : to_unicode = lambda s : force_text ( s , strings_only = True ) if categories : categories = [ to_unicode ( c ) for c in categories ] if ttl is not None : # Force ints to unicode ttl = force_text ( ttl ) item = { 'title' : to_unicode ( title ) , 'link' : iri_to_uri ( link ) , 'description' : to_unicode ( description ) , 'author_email' : to_unicode ( author_email ) , 'author_name' : to_unicode ( author_name ) , 'author_link' : iri_to_uri ( author_link ) , 'pubdate' : pubdate , 'comments' : comments , 'unique_id' : to_unicode ( unique_id ) , 'enclosure' : enclosure , 'categories' : categories or ( ) , 'item_copyright' : to_unicode ( item_copyright ) , 'ttl' : ttl , } item . update ( kwargs ) self . items . append ( item )
Adds an item to the feed . All args are expected to be Python Unicode objects except pubdate which is a datetime . datetime object and enclosure which is an instance of the Enclosure class .
317
40
23,864
def call ( method , data , post = False ) : url = "%s%s" % ( 'http://disqus.com/api/' , method ) if post : # POST request url += "/" data = urlencode ( data ) else : # GET request url += "?%s" % urlencode ( data ) data = '' res = json . load ( urlopen ( url , data ) ) if not res [ 'succeeded' ] : raise CommandError ( "'%s' failed: %s\nData: %s" % ( method , res [ 'code' ] , data ) ) return res [ 'message' ]
Calls method from the DISQUS API with data either in POST or GET . Returns deserialized JSON response .
141
24
23,865
def _get_comments_to_export ( self , last_export_id = None ) : qs = comments . get_model ( ) . objects . order_by ( 'pk' ) . filter ( is_public = True , is_removed = False ) if last_export_id is not None : print ( "Resuming after comment %s" % str ( last_export_id ) ) qs = qs . filter ( id__gt = last_export_id ) return qs
Return comments which should be exported .
110
7
23,866
def _get_last_state ( self , state_file ) : state = None fp = open ( state_file ) try : state = int ( fp . read ( ) ) print ( "Found previous state: %d" % ( state , ) ) finally : fp . close ( ) return state
Checks the given path for the last exported comment s id
66
12
23,867
def _save_state ( self , state_file , last_pk ) : fp = open ( state_file , 'w+' ) try : fp . write ( str ( last_pk ) ) finally : fp . close ( )
Saves the last_pk into the given state_file
55
13
23,868
def _get_request ( self , request_url , request_method , * * params ) : if request_method == 'GET' : if params : request_url += '&%s' % urlencode ( params ) request = Request ( request_url ) elif request_method == 'POST' : request = Request ( request_url , urlencode ( params , doseq = 1 ) ) return request
Return a Request object that has the GET parameters attached to the url or the POST data attached to the object .
89
22
23,869
def call ( self , method , * * params ) : url = self . api_url % method request = self . _get_request ( url , self . METHODS [ method ] , * * params ) try : response = urlopen ( request ) except URLError : raise else : response_json = json . loads ( response . read ( ) ) if not response_json [ 'succeeded' ] : raise DisqusException ( response_json [ 'message' ] ) return response_json [ 'message' ]
Call the DISQUS API and return the json response . URLError is raised when the request failed . DisqusException is raised when the query didn t succeed .
114
35
23,870
def init_app ( app ) : config = EmailsConfig ( app ) # register extension with app app . extensions = getattr ( app , 'extensions' , { } ) app . extensions [ 'emails' ] = config return config
Initialize flask application . It creates EmailsConfig object and saves it in app . extensions .
50
18
23,871
def send ( self , smtp = None , * * kw ) : smtp_options = { } smtp_options . update ( self . config . smtp_options ) if smtp : smtp_options . update ( smtp ) return super ( Message , self ) . send ( smtp = smtp_options , * * kw )
Sends message .
76
4
23,872
def options ( self ) : config = self . _config o = { } o . update ( self . _default_smtp_options ) o . update ( self . _default_message_options ) o . update ( self . _default_backend_options ) o . update ( get_namespace ( config , 'EMAIL_' , valid_keys = o . keys ( ) ) ) o [ 'port' ] = int ( o [ 'port' ] ) o [ 'timeout' ] = float ( o [ 'timeout' ] ) return o
Reads all EMAIL_ options and set default values .
119
13
23,873
def smtp_options ( self ) : o = { } options = self . options for key in self . _default_smtp_options : if key in options : o [ key ] = options [ key ] o [ 'user' ] = o . pop ( 'host_user' , None ) o [ 'password' ] = o . pop ( 'host_password' , None ) o [ 'tls' ] = o . pop ( 'use_tls' , False ) o [ 'ssl' ] = o . pop ( 'use_ssl' , False ) o [ 'debug' ] = o . pop ( 'smtp_debug' , 0 ) for k in ( 'certfile' , 'keyfile' ) : v = o . pop ( 'ssl_' + k , None ) if v : o [ k ] = v return o
Convert config namespace to emails . backend . SMTPBackend namespace Returns dict for SMTPFactory
184
20
23,874
def message_options ( self ) : o = { } options = self . options for key in self . _default_message_options : if key in options : o [ key ] = options [ key ] return o
Convert config namespace to emails . Message namespace
45
9
23,875
def start ( self ) -> None : session = self . client . create_session ( self . kind , self . proxy_user , self . jars , self . py_files , self . files , self . driver_memory , self . driver_cores , self . executor_memory , self . executor_cores , self . num_executors , self . archives , self . queue , self . name , self . spark_conf , ) self . session_id = session . session_id not_ready = { SessionState . NOT_STARTED , SessionState . STARTING } intervals = polling_intervals ( [ 0.1 , 0.2 , 0.3 , 0.5 ] , 1.0 ) while self . state in not_ready : time . sleep ( next ( intervals ) )
Create the remote Spark session and wait for it to be ready .
175
13
23,876
def state ( self ) -> SessionState : if self . session_id is None : raise ValueError ( "session not yet started" ) session = self . client . get_session ( self . session_id ) if session is None : raise ValueError ( "session not found - it may have been shut down" ) return session . state
The state of the managed Spark session .
71
8
23,877
def close ( self ) -> None : if self . session_id is not None : self . client . delete_session ( self . session_id ) self . client . close ( )
Kill the managed Spark session .
39
6
23,878
def run ( self , code : str ) -> Output : output = self . _execute ( code ) if self . echo and output . text : print ( output . text ) if self . check : output . raise_for_status ( ) return output
Run some code in the managed Spark session .
52
9
23,879
def read ( self , dataframe_name : str ) -> pandas . DataFrame : code = serialise_dataframe_code ( dataframe_name , self . kind ) output = self . _execute ( code ) output . raise_for_status ( ) if output . text is None : raise RuntimeError ( "statement had no text output" ) return deserialise_dataframe ( output . text )
Evaluate and retrieve a Spark dataframe in the managed session .
87
14
23,880
def read_sql ( self , code : str ) -> pandas . DataFrame : if self . kind != SessionKind . SQL : raise ValueError ( "not a SQL session" ) output = self . _execute ( code ) output . raise_for_status ( ) if output . json is None : raise RuntimeError ( "statement had no JSON output" ) return dataframe_from_json_output ( output . json )
Evaluate a Spark SQL satatement and retrieve the result .
90
14
23,881
def server_version ( self ) -> Version : if self . _server_version_cache is None : data = self . _client . get ( "/version" ) self . _server_version_cache = Version ( data [ "version" ] ) return self . _server_version_cache
Get the version of Livy running on the server .
62
11
23,882
def list_sessions ( self ) -> List [ Session ] : data = self . _client . get ( "/sessions" ) return [ Session . from_json ( item ) for item in data [ "sessions" ] ]
List all the active sessions in Livy .
49
9
23,883
def create_session ( self , kind : SessionKind , proxy_user : str = None , jars : List [ str ] = None , py_files : List [ str ] = None , files : List [ str ] = None , driver_memory : str = None , driver_cores : int = None , executor_memory : str = None , executor_cores : int = None , num_executors : int = None , archives : List [ str ] = None , queue : str = None , name : str = None , spark_conf : Dict [ str , Any ] = None , ) -> Session : if self . legacy_server ( ) : valid_kinds = VALID_LEGACY_SESSION_KINDS else : valid_kinds = VALID_SESSION_KINDS if kind not in valid_kinds : raise ValueError ( f"{kind} is not a valid session kind for a Livy server of " f"this version (should be one of {valid_kinds})" ) body = { "kind" : kind . value } if proxy_user is not None : body [ "proxyUser" ] = proxy_user if jars is not None : body [ "jars" ] = jars if py_files is not None : body [ "pyFiles" ] = py_files if files is not None : body [ "files" ] = files if driver_memory is not None : body [ "driverMemory" ] = driver_memory if driver_cores is not None : body [ "driverCores" ] = driver_cores if executor_memory is not None : body [ "executorMemory" ] = executor_memory if executor_cores is not None : body [ "executorCores" ] = executor_cores if num_executors is not None : body [ "numExecutors" ] = num_executors if archives is not None : body [ "archives" ] = archives if queue is not None : body [ "queue" ] = queue if name is not None : body [ "name" ] = name if spark_conf is not None : body [ "conf" ] = spark_conf data = self . _client . post ( "/sessions" , data = body ) return Session . from_json ( data )
Create a new session in Livy .
503
8
23,884
def list_statements ( self , session_id : int ) -> List [ Statement ] : response = self . _client . get ( f"/sessions/{session_id}/statements" ) return [ Statement . from_json ( session_id , data ) for data in response [ "statements" ] ]
Get all the statements in a session .
70
8
23,885
def create_statement ( self , session_id : int , code : str , kind : StatementKind = None ) -> Statement : data = { "code" : code } if kind is not None : if self . legacy_server ( ) : LOGGER . warning ( "statement kind ignored on Livy<0.5.0" ) data [ "kind" ] = kind . value response = self . _client . post ( f"/sessions/{session_id}/statements" , data = data ) return Statement . from_json ( session_id , response )
Run a statement in a session .
123
7
23,886
def get_statement ( self , session_id : int , statement_id : int ) -> Statement : response = self . _client . get ( f"/sessions/{session_id}/statements/{statement_id}" ) return Statement . from_json ( session_id , response )
Get information about a statement in a session .
65
9
23,887
def lattice ( lattice , filename , directory , render , view , * * kwargs ) : dot = graphviz . Digraph ( name = lattice . __class__ . __name__ , comment = repr ( lattice ) , filename = filename , directory = directory , node_attr = dict ( shape = 'circle' , width = '.25' , style = 'filled' , label = '' ) , edge_attr = dict ( dir = 'none' , labeldistance = '1.5' , minlen = '2' ) , * * kwargs ) sortkey = SORTKEYS [ 0 ] node_name = NAME_GETTERS [ 0 ] for concept in lattice . _concepts : name = node_name ( concept ) dot . node ( name ) if concept . objects : dot . edge ( name , name , headlabel = ' ' . join ( concept . objects ) , labelangle = '270' , color = 'transparent' ) if concept . properties : dot . edge ( name , name , taillabel = ' ' . join ( concept . properties ) , labelangle = '90' , color = 'transparent' ) dot . edges ( ( name , node_name ( c ) ) for c in sorted ( concept . lower_neighbors , key = sortkey ) ) if render or view : dot . render ( view = view ) # pragma: no cover return dot
Return graphviz source for visualizing the lattice graph .
307
13
23,888
def load ( cls , filename , encoding ) : if encoding is None : encoding = cls . encoding with io . open ( filename , 'r' , encoding = encoding ) as fd : source = fd . read ( ) if cls . normalize_newlines : source = source . replace ( '\r\n' , '\n' ) . replace ( '\r' , '\n' ) return cls . loads ( source )
Load and parse serialized objects properties bools from file .
98
12
23,889
def dump ( cls , filename , objects , properties , bools , encoding ) : if encoding is None : encoding = cls . encoding source = cls . dumps ( objects , properties , bools ) if PY2 : source = unicode ( source ) with io . open ( filename , 'w' , encoding = encoding ) as fd : fd . write ( source )
Write serialized objects properties bools to file .
81
10
23,890
def load_csv ( filename , dialect = 'excel' , encoding = 'utf-8' ) : return Context . fromfile ( filename , 'csv' , encoding , dialect = dialect )
Load and return formal context from CSV file .
41
9
23,891
def ensure_compatible ( left , right ) : conflicts = list ( conflicting_pairs ( left , right ) ) if conflicts : raise ValueError ( 'conflicting values for object/property pairs: %r' % conflicts )
Raise an informative ValueError if the two definitions disagree .
48
12
23,892
def rename_object ( self , old , new ) : self . _objects . replace ( old , new ) pairs = self . _pairs pairs |= { ( new , p ) for p in self . _properties if ( old , p ) in pairs and not pairs . remove ( ( old , p ) ) }
Replace the name of an object by a new one .
67
12
23,893
def rename_property ( self , old , new ) : self . _properties . replace ( old , new ) pairs = self . _pairs pairs |= { ( o , new ) for o in self . _objects if ( o , old ) in pairs and not pairs . remove ( ( o , old ) ) }
Replace the name of a property by a new one .
67
12
23,894
def add_object ( self , obj , properties = ( ) ) : self . _objects . add ( obj ) self . _properties |= properties self . _pairs . update ( ( obj , p ) for p in properties )
Add an object to the definition and add properties as related .
49
12
23,895
def add_property ( self , prop , objects = ( ) ) : self . _properties . add ( prop ) self . _objects |= objects self . _pairs . update ( ( o , prop ) for o in objects )
Add a property to the definition and add objects as related .
49
12
23,896
def remove_object ( self , obj ) : self . _objects . remove ( obj ) self . _pairs . difference_update ( ( obj , p ) for p in self . _properties )
Remove an object from the definition .
42
7
23,897
def remove_property ( self , prop ) : self . _properties . remove ( prop ) self . _pairs . difference_update ( ( o , prop ) for o in self . _objects )
Remove a property from the definition .
42
7
23,898
def set_object ( self , obj , properties ) : self . _objects . add ( obj ) properties = set ( properties ) self . _properties |= properties pairs = self . _pairs for p in self . _properties : if p in properties : pairs . add ( ( obj , p ) ) else : pairs . discard ( ( obj , p ) )
Add an object to the definition and set its properties .
76
11
23,899
def set_property ( self , prop , objects ) : self . _properties . add ( prop ) objects = set ( objects ) self . _objects |= objects pairs = self . _pairs for o in self . _objects : if o in objects : pairs . add ( ( o , prop ) ) else : pairs . discard ( ( o , prop ) )
Add a property to the definition and set its objects .
76
11