idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,300
def load_notebook ( self , name ) : with open ( self . get_path ( '%s.ipynb' % name ) ) as f : nb = nbformat . read ( f , as_version = 4 ) return nb , f
Loads a notebook file into memory .
57
8
25,301
def run_notebook ( self , nb , f ) : if PYTHON_MAJOR_VERSION == 3 : kernel_name = 'python3' elif PYTHON_MAJOR_VERSION == 2 : kernel_name = 'python2' else : raise Exception ( 'Only Python 2 and 3 are supported' ) ep = ExecutePreprocessor ( timeout = 600 , kernel_name = kernel_name ) try : ep . preprocess ( nb , { 'metadata' : { 'path' : '.' } } ) except CellExecutionError : msg = 'Error executing the notebook "%s".\n\n' % f . name msg += 'See notebook "%s" for the traceback.' % f . name print ( msg ) raise finally : nbformat . write ( nb , f )
Runs a loaded notebook file .
178
7
25,302
def execute_notebook ( self , name ) : warnings . filterwarnings ( "ignore" , category = DeprecationWarning ) nb , f = self . load_notebook ( name ) self . run_notebook ( nb , f ) self . assertTrue ( True )
Loads and then runs a notebook file .
61
9
25,303
def convert_notebook ( self , name ) : #subprocess.call(["jupyter","nbconvert","--to","python", # self.get_path("%s.ipynb"%name)]) exporter = nbconvert . exporters . python . PythonExporter ( ) relative_path = self . convert_path ( name ) file_path = self . get_path ( "%s.ipynb" % relative_path ) code = exporter . from_filename ( file_path ) [ 0 ] self . write_code ( name , code ) self . clean_code ( name , [ ] )
Converts a notebook into a python file .
139
9
25,304
def convert_and_execute_notebook ( self , name ) : self . convert_notebook ( name ) code = self . read_code ( name ) #clean_code(name,'get_ipython') exec ( code , globals ( ) )
Converts a notebook into a python file and then runs it .
55
13
25,305
def gen_file_path ( self , name ) : relative_path = self . convert_path ( name ) file_path = self . get_path ( "%s.ipynb" % relative_path ) parent_path = rec_apply ( os . path . dirname , self . gen_file_level ) ( file_path ) gen_file_name = name if isinstance ( name , str ) else name [ 1 ] #Name of generated file gen_dir_path = self . get_path ( os . path . join ( parent_path , self . gen_dir_name ) ) if not os . path . exists ( gen_dir_path ) : # Create folder for generated files if needed os . makedirs ( gen_dir_path ) new_file_path = self . get_path ( '%s.py' % os . path . join ( gen_dir_path , gen_file_name ) ) return new_file_path
Returns full path to generated files . Checks to see if directory exists where generated files are stored and creates one otherwise .
209
23
25,306
def read_code ( self , name ) : file_path = self . gen_file_path ( name ) with open ( file_path ) as f : code = f . read ( ) return code
Reads code from a python file called name
43
9
25,307
def clean_code ( self , name , forbidden ) : code = self . read_code ( name ) code = code . split ( '\n' ) new_code = [ ] for line in code : if [ bad for bad in forbidden if bad in line ] : pass else : allowed = [ 'time' , 'timeit' ] # Magics where we want to keep the command line = self . strip_line_magic ( line , allowed ) if isinstance ( line , list ) : line = ' ' . join ( line ) new_code . append ( line ) new_code = '\n' . join ( new_code ) self . write_code ( name , new_code ) return new_code
Remove lines containing items in forbidden from the code . Helpful for executing converted notebooks that still retain IPython magic commands .
153
23
25,308
def do_notebook ( self , name ) : CONVERT_NOTEBOOKS = int ( os . getenv ( 'CONVERT_NOTEBOOKS' , True ) ) s = StringIO ( ) if mock : out = unittest . mock . patch ( 'sys.stdout' , new = MockDevice ( s ) ) err = unittest . mock . patch ( 'sys.stderr' , new = MockDevice ( s ) ) self . _do_notebook ( name , CONVERT_NOTEBOOKS ) out . close ( ) err . close ( ) else : self . _do_notebook ( name , CONVERT_NOTEBOOKS ) self . assertTrue ( True )
Run a notebook file after optionally converting it to a python file .
150
13
25,309
def _do_notebook ( self , name , convert_notebooks = False ) : if convert_notebooks : self . convert_and_execute_notebook ( name ) else : self . execute_notebook ( name )
Called by do_notebook to actually run the notebook .
49
13
25,310
def get_capabilities ( cls ) : capabilities = [ ] for _cls in cls . mro ( ) : if issubclass ( _cls , Capability ) and _cls is not Capability and not issubclass ( _cls , Model ) : capabilities . append ( _cls ) return capabilities
List the model s capabilities .
70
6
25,311
def failed_extra_capabilities ( self ) : failed = [ ] for capability , f_name in self . extra_capability_checks . items ( ) : f = getattr ( self , f_name ) instance_capable = f ( ) if not instance_capable : failed . append ( capability ) return failed
Check to see if instance passes its extra_capability_checks .
69
14
25,312
def describe ( self ) : result = "No description available" if self . description : result = "%s" % self . description else : if self . __doc__ : s = [ ] s += [ self . __doc__ . strip ( ) . replace ( '\n' , '' ) . replace ( ' ' , ' ' ) ] result = '\n' . join ( s ) return result
Describe the model .
85
5
25,313
def is_match ( self , match ) : result = False if self == match : result = True elif isinstance ( match , str ) and fnmatchcase ( self . name , match ) : result = True # Found by instance or name return result
Return whether this model is the same as match .
53
10
25,314
def main ( * args ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( "action" , help = "create, check, run, make-nb, or run-nb" ) parser . add_argument ( "--directory" , "-dir" , default = os . getcwd ( ) , help = "path to directory with a .sciunit file" ) parser . add_argument ( "--stop" , "-s" , default = True , help = "stop and raise errors, halting the program" ) parser . add_argument ( "--tests" , "-t" , default = False , help = "runs tests instead of suites" ) if args : args = parser . parse_args ( args ) else : args = parser . parse_args ( ) file_path = os . path . join ( args . directory , '.sciunit' ) config = None if args . action == 'create' : create ( file_path ) elif args . action == 'check' : config = parse ( file_path , show = True ) print ( "\nNo configuration errors reported." ) elif args . action == 'run' : config = parse ( file_path ) run ( config , path = args . directory , stop_on_error = args . stop , just_tests = args . tests ) elif args . action == 'make-nb' : config = parse ( file_path ) make_nb ( config , path = args . directory , stop_on_error = args . stop , just_tests = args . tests ) elif args . action == 'run-nb' : config = parse ( file_path ) run_nb ( config , path = args . directory ) else : raise NameError ( 'No such action %s' % args . action ) if config : cleanup ( config , path = args . directory )
Launch the main routine .
397
5
25,315
def create ( file_path ) : if os . path . exists ( file_path ) : raise IOError ( "There is already a configuration file at %s" % file_path ) with open ( file_path , 'w' ) as f : config = configparser . ConfigParser ( ) config . add_section ( 'misc' ) config . set ( 'misc' , 'config-version' , '1.0' ) default_nb_name = os . path . split ( os . path . dirname ( file_path ) ) [ 1 ] config . set ( 'misc' , 'nb-name' , default_nb_name ) config . add_section ( 'root' ) config . set ( 'root' , 'path' , '.' ) config . add_section ( 'models' ) config . set ( 'models' , 'module' , 'models' ) config . add_section ( 'tests' ) config . set ( 'tests' , 'module' , 'tests' ) config . add_section ( 'suites' ) config . set ( 'suites' , 'module' , 'suites' ) config . write ( f )
Create a default . sciunit config file if one does not already exist .
252
15
25,316
def parse ( file_path = None , show = False ) : if file_path is None : file_path = os . path . join ( os . getcwd ( ) , '.sciunit' ) if not os . path . exists ( file_path ) : raise IOError ( 'No .sciunit file was found at %s' % file_path ) # Load the configuration file config = configparser . RawConfigParser ( allow_no_value = True ) config . read ( file_path ) # List all contents for section in config . sections ( ) : if show : print ( section ) for options in config . options ( section ) : if show : print ( "\t%s: %s" % ( options , config . get ( section , options ) ) ) return config
Parse a . sciunit config file .
167
9
25,317
def prep ( config = None , path = None ) : if config is None : config = parse ( ) if path is None : path = os . getcwd ( ) root = config . get ( 'root' , 'path' ) root = os . path . join ( path , root ) root = os . path . realpath ( root ) os . environ [ 'SCIDASH_HOME' ] = root if sys . path [ 0 ] != root : sys . path . insert ( 0 , root )
Prepare to read the configuration information .
108
8
25,318
def run ( config , path = None , stop_on_error = True , just_tests = False ) : if path is None : path = os . getcwd ( ) prep ( config , path = path ) models = __import__ ( 'models' ) tests = __import__ ( 'tests' ) suites = __import__ ( 'suites' ) print ( '\n' ) for x in [ 'models' , 'tests' , 'suites' ] : module = __import__ ( x ) assert hasattr ( module , x ) , "'%s' module requires attribute '%s'" % ( x , x ) if just_tests : for test in tests . tests : _run ( test , models , stop_on_error ) else : for suite in suites . suites : _run ( suite , models , stop_on_error )
Run sciunit tests for the given configuration .
183
9
25,319
def nb_name_from_path ( config , path ) : if path is None : path = os . getcwd ( ) root = config . get ( 'root' , 'path' ) root = os . path . join ( path , root ) root = os . path . realpath ( root ) default_nb_name = os . path . split ( os . path . realpath ( root ) ) [ 1 ] nb_name = config . get ( 'misc' , 'nb-name' , fallback = default_nb_name ) return root , nb_name
Get a notebook name from a path to a notebook
126
10
25,320
def make_nb ( config , path = None , stop_on_error = True , just_tests = False ) : root , nb_name = nb_name_from_path ( config , path ) clean = lambda varStr : re . sub ( '\W|^(?=\d)' , '_' , varStr ) name = clean ( nb_name ) mpl_style = config . get ( 'misc' , 'matplotlib' , fallback = 'inline' ) cells = [ new_markdown_cell ( '## Sciunit Testing Notebook for %s' % nb_name ) ] add_code_cell ( cells , ( "%%matplotlib %s\n" "from IPython.display import display\n" "from importlib.machinery import SourceFileLoader\n" "%s = SourceFileLoader('scidash', '%s/__init__.py').load_module()" ) % ( mpl_style , name , root ) ) if just_tests : add_code_cell ( cells , ( "for test in %s.tests.tests:\n" " score_array = test.judge(%s.models.models, stop_on_error=%r)\n" " display(score_array)" ) % ( name , name , stop_on_error ) ) else : add_code_cell ( cells , ( "for suite in %s.suites.suites:\n" " score_matrix = suite.judge(" "%s.models.models, stop_on_error=%r)\n" " display(score_matrix)" ) % ( name , name , stop_on_error ) ) write_nb ( root , nb_name , cells )
Create a Jupyter notebook sciunit tests for the given configuration .
388
15
25,321
def write_nb ( root , nb_name , cells ) : nb = new_notebook ( cells = cells , metadata = { 'language' : 'python' , } ) nb_path = os . path . join ( root , '%s.ipynb' % nb_name ) with codecs . open ( nb_path , encoding = 'utf-8' , mode = 'w' ) as nb_file : nbformat . write ( nb , nb_file , NB_VERSION ) print ( "Created Jupyter notebook at:\n%s" % nb_path )
Write a jupyter notebook to disk .
137
10
25,322
def run_nb ( config , path = None ) : if path is None : path = os . getcwd ( ) root = config . get ( 'root' , 'path' ) root = os . path . join ( path , root ) nb_name = config . get ( 'misc' , 'nb-name' ) nb_path = os . path . join ( root , '%s.ipynb' % nb_name ) if not os . path . exists ( nb_path ) : print ( ( "No notebook found at %s. " "Create the notebook first with make-nb?" ) % path ) sys . exit ( 0 ) with codecs . open ( nb_path , encoding = 'utf-8' , mode = 'r' ) as nb_file : nb = nbformat . read ( nb_file , as_version = NB_VERSION ) ep = ExecutePreprocessor ( timeout = 600 ) ep . preprocess ( nb , { 'metadata' : { 'path' : root } } ) with codecs . open ( nb_path , encoding = 'utf-8' , mode = 'w' ) as nb_file : nbformat . write ( nb , nb_file , NB_VERSION )
Run a notebook file .
279
5
25,323
def add_code_cell ( cells , source ) : from nbformat . v4 . nbbase import new_code_cell n_code_cells = len ( [ c for c in cells if c [ 'cell_type' ] == 'code' ] ) cells . append ( new_code_cell ( source = source , execution_count = n_code_cells + 1 ) )
Add a code cell containing source to the notebook .
85
10
25,324
def cleanup ( config = None , path = None ) : if config is None : config = parse ( ) if path is None : path = os . getcwd ( ) root = config . get ( 'root' , 'path' ) root = os . path . join ( path , root ) if sys . path [ 0 ] == root : sys . path . remove ( root )
Cleanup by removing paths added during earlier in configuration .
80
11
25,325
def get_repo ( self , cached = True ) : module = sys . modules [ self . __module__ ] # We use module.__file__ instead of module.__path__[0] # to include modules without a __path__ attribute. if hasattr ( self . __class__ , '_repo' ) and cached : repo = self . __class__ . _repo elif hasattr ( module , '__file__' ) : path = os . path . realpath ( module . __file__ ) try : repo = git . Repo ( path , search_parent_directories = True ) except InvalidGitRepositoryError : repo = None else : repo = None self . __class__ . _repo = repo return repo
Get a git repository object for this instance .
162
9
25,326
def get_remote ( self , remote = 'origin' ) : repo = self . get_repo ( ) if repo is not None : remotes = { r . name : r for r in repo . remotes } r = repo . remotes [ 0 ] if remote not in remotes else remotes [ remote ] else : r = None return r
Get a git remote object for this instance .
75
9
25,327
def get_remote_url ( self , remote = 'origin' , cached = True ) : if hasattr ( self . __class__ , '_remote_url' ) and cached : url = self . __class__ . _remote_url else : r = self . get_remote ( remote ) try : url = list ( r . urls ) [ 0 ] except GitCommandError as ex : if 'correct access rights' in str ( ex ) : # If ssh is not setup to access this repository cmd = [ 'git' , 'config' , '--get' , 'remote.%s.url' % r . name ] url = Git ( ) . execute ( cmd ) else : raise ex except AttributeError : url = None if url is not None and url . startswith ( 'git@' ) : domain = url . split ( '@' ) [ 1 ] . split ( ':' ) [ 0 ] path = url . split ( ':' ) [ 1 ] url = "http://%s/%s" % ( domain , path ) self . __class__ . _remote_url = url return url
Get a git remote URL for this instance .
241
9
25,328
def _validate_iterable ( self , is_iterable , key , value ) : if is_iterable : try : iter ( value ) except TypeError : self . _error ( key , "Must be iterable (e.g. a list or array)" )
Validate fields with iterable key in schema set to True
59
12
25,329
def _validate_units ( self , has_units , key , value ) : if has_units : if isinstance ( self . test . units , dict ) : required_units = self . test . units [ key ] else : required_units = self . test . units if not isinstance ( value , pq . quantity . Quantity ) : self . _error ( key , "Must be a python quantity" ) if not isinstance ( value , pq . quantity . Quantity ) : self . _error ( key , "Must be a python quantity" ) provided_units = value . simplified . units if not isinstance ( required_units , pq . Dimensionless ) : required_units = required_units . simplified . units if not required_units == provided_units : self . _error ( key , "Must have units of '%s'" % self . test . units . name )
Validate fields with units key in schema set to True .
190
12
25,330
def validate_quantity ( self , value ) : if not isinstance ( value , pq . quantity . Quantity ) : self . _error ( '%s' % value , "Must be a Python quantity." )
Validate that the value is of the Quantity type .
46
11
25,331
def compute ( cls , observation , prediction ) : assert isinstance ( observation , dict ) try : p_value = prediction [ 'mean' ] # Use the prediction's mean. except ( TypeError , KeyError , IndexError ) : # If there isn't one... try : p_value = prediction [ 'value' ] # Use the prediction's value. except ( TypeError , IndexError ) : # If there isn't one... p_value = prediction # Use the prediction (assume numeric). o_mean = observation [ 'mean' ] o_std = observation [ 'std' ] value = ( p_value - o_mean ) / o_std value = utils . assert_dimensionless ( value ) if np . isnan ( value ) : score = InsufficientDataScore ( 'One of the input values was NaN' ) else : score = ZScore ( value ) return score
Compute a z - score from an observation and a prediction .
191
13
25,332
def norm_score ( self ) : cdf = ( 1.0 + math . erf ( self . score / math . sqrt ( 2.0 ) ) ) / 2.0 return 1 - 2 * math . fabs ( 0.5 - cdf )
Return the normalized score .
57
5
25,333
def compute ( cls , observation , prediction ) : assert isinstance ( observation , dict ) assert isinstance ( prediction , dict ) p_mean = prediction [ 'mean' ] # Use the prediction's mean. p_std = prediction [ 'std' ] o_mean = observation [ 'mean' ] o_std = observation [ 'std' ] try : # Try to pool taking samples sizes into account. p_n = prediction [ 'n' ] o_n = observation [ 'n' ] s = ( ( ( p_n - 1 ) * ( p_std ** 2 ) + ( o_n - 1 ) * ( o_std ** 2 ) ) / ( p_n + o_n - 2 ) ) ** 0.5 except KeyError : # If sample sizes are not available. s = ( p_std ** 2 + o_std ** 2 ) ** 0.5 value = ( p_mean - o_mean ) / s value = utils . assert_dimensionless ( value ) return CohenDScore ( value )
Compute a Cohen s D from an observation and a prediction .
222
13
25,334
def compute ( cls , observation , prediction , key = None ) : assert isinstance ( observation , ( dict , float , int , pq . Quantity ) ) assert isinstance ( prediction , ( dict , float , int , pq . Quantity ) ) obs , pred = cls . extract_means_or_values ( observation , prediction , key = key ) value = pred / obs value = utils . assert_dimensionless ( value ) return RatioScore ( value )
Compute a ratio from an observation and a prediction .
100
11
25,335
def compute_ssd ( cls , observation , prediction ) : # The sum of the squared differences. value = ( ( observation - prediction ) ** 2 ) . sum ( ) score = FloatScore ( value ) return score
Compute sum - squared diff between observation and prediction .
46
11
25,336
def read_requirements ( ) : reqs_path = os . path . join ( '.' , 'requirements.txt' ) install_reqs = parse_requirements ( reqs_path , session = PipSession ( ) ) reqs = [ str ( ir . req ) for ir in install_reqs ] return reqs
parses requirements from requirements . txt
72
9
25,337
def register_backends ( vars ) : new_backends = { x . replace ( 'Backend' , '' ) : cls for x , cls in vars . items ( ) if inspect . isclass ( cls ) and issubclass ( cls , Backend ) } available_backends . update ( new_backends )
Register backends for use with models .
75
8
25,338
def init_backend ( self , * args , * * kwargs ) : self . model . attrs = { } self . use_memory_cache = kwargs . get ( 'use_memory_cache' , True ) if self . use_memory_cache : self . init_memory_cache ( ) self . use_disk_cache = kwargs . get ( 'use_disk_cache' , False ) if self . use_disk_cache : self . init_disk_cache ( ) self . load_model ( ) self . model . unpicklable += [ '_backend' ]
Initialize the backend .
134
5
25,339
def init_disk_cache ( self ) : try : # Cleanup old disk cache files path = self . disk_cache_location os . remove ( path ) except Exception : pass self . disk_cache_location = os . path . join ( tempfile . mkdtemp ( ) , 'cache' )
Initialize the on - disk version of the cache .
65
11
25,340
def get_memory_cache ( self , key = None ) : key = self . model . hash if key is None else key self . _results = self . memory_cache . get ( key ) return self . _results
Return result in memory cache for key key or None if not found .
47
14
25,341
def get_disk_cache ( self , key = None ) : key = self . model . hash if key is None else key if not getattr ( self , 'disk_cache_location' , False ) : self . init_disk_cache ( ) disk_cache = shelve . open ( self . disk_cache_location ) self . _results = disk_cache . get ( key ) disk_cache . close ( ) return self . _results
Return result in disk cache for key key or None if not found .
96
14
25,342
def set_memory_cache ( self , results , key = None ) : key = self . model . hash if key is None else key self . memory_cache [ key ] = results
Store result in memory cache with key matching model state .
39
11
25,343
def set_disk_cache ( self , results , key = None ) : if not getattr ( self , 'disk_cache_location' , False ) : self . init_disk_cache ( ) disk_cache = shelve . open ( self . disk_cache_location ) key = self . model . hash if key is None else key disk_cache [ key ] = results disk_cache . close ( )
Store result in disk cache with key matching model state .
88
11
25,344
def backend_run ( self ) : key = self . model . hash if self . use_memory_cache and self . get_memory_cache ( key ) : return self . _results if self . use_disk_cache and self . get_disk_cache ( key ) : return self . _results results = self . _backend_run ( ) if self . use_memory_cache : self . set_memory_cache ( results , key ) if self . use_disk_cache : self . set_disk_cache ( results , key ) return results
Check for cached results ; then run the model if needed .
120
12
25,345
def save_results ( self , path = '.' ) : with open ( path , 'wb' ) as f : pickle . dump ( self . results , f )
Save results on disk .
36
5
25,346
def check ( cls , model , require_extra = False ) : class_capable = isinstance ( model , cls ) f_name = model . extra_capability_checks . get ( cls , None ) if model . extra_capability_checks is not None else False if f_name : f = getattr ( model , f_name ) instance_capable = f ( ) elif not require_extra : instance_capable = True else : instance_capable = False return class_capable and instance_capable
Check whether the provided model has this capability .
117
9
25,347
def set_backend ( self , backend ) : if isinstance ( backend , str ) : name = backend args = [ ] kwargs = { } elif isinstance ( backend , ( tuple , list ) ) : name = '' args = [ ] kwargs = { } for i in range ( len ( backend ) ) : if i == 0 : name = backend [ i ] else : if isinstance ( backend [ i ] , dict ) : kwargs . update ( backend [ i ] ) else : args += backend [ i ] else : raise TypeError ( "Backend must be string, tuple, or list" ) if name in available_backends : self . backend = name self . _backend = available_backends [ name ] ( ) elif name is None : # The base class should not be called. raise Exception ( ( "A backend (e.g. 'jNeuroML' or 'NEURON') " "must be selected" ) ) else : raise Exception ( "Backend %s not found in backends.py" % name ) self . _backend . model = self self . _backend . init_backend ( * args , * * kwargs )
Set the simulation backend .
259
5
25,348
def color ( self , value = None ) : if value is None : value = self . norm_score rgb = Score . value_color ( value ) return rgb
Turn the score intp an RGB color tuple of three 8 - bit integers .
34
16
25,349
def extract_means_or_values ( cls , observation , prediction , key = None ) : obs_mv = cls . extract_mean_or_value ( observation , key ) pred_mv = cls . extract_mean_or_value ( prediction , key ) return obs_mv , pred_mv
Extracts the mean value or user - provided key from the observation and prediction dictionaries .
72
19
25,350
def extract_mean_or_value ( cls , obs_or_pred , key = None ) : result = None if not isinstance ( obs_or_pred , dict ) : result = obs_or_pred else : keys = ( [ key ] if key is not None else [ ] ) + [ 'mean' , 'value' ] for k in keys : if k in obs_or_pred : result = obs_or_pred [ k ] break if result is None : raise KeyError ( ( "%s has neither a mean nor a single " "value" % obs_or_pred ) ) return result
Extracts the mean value or user - provided key from an observation or prediction dictionary .
132
18
25,351
def summary ( self ) : return "== Model %s did not complete test %s due to error '%s'. ==" % ( str ( self . model ) , str ( self . test ) , str ( self . score ) )
Summarize the performance of a model on a test .
50
12
25,352
def load ( self , draw_bbox = False , * * kwargs ) : im = Image . new ( 'RGBA' , self . img_size ) draw = None if draw_bbox : draw = ImageDraw . Draw ( im ) for sprite in self . images : data = sprite . load ( ) sprite_im = Image . open ( BytesIO ( data ) ) size = sprite . imgrect im . paste ( sprite_im , ( size [ 0 ] , size [ 2 ] ) ) if draw_bbox : draw . rectangle ( ( size [ 0 ] , size [ 2 ] , size [ 1 ] , size [ 3 ] ) , outline = 'red' ) del draw b = BytesIO ( ) im . save ( b , format = 'PNG' ) return b . getvalue ( )
Makes the canvas . This could be far speedier if it copied raw pixels but that would take far too much time to write vs using Image inbuilts
176
32
25,353
def match_window ( in_data , offset ) : window_start = max ( offset - WINDOW_MASK , 0 ) for n in range ( MAX_LEN , THRESHOLD - 1 , - 1 ) : window_end = min ( offset + n , len ( in_data ) ) # we've not got enough data left for a meaningful result if window_end - offset < THRESHOLD : return None str_to_find = in_data [ offset : window_end ] idx = in_data . rfind ( str_to_find , window_start , window_end - n ) if idx != - 1 : code_offset = offset - idx # - 1 code_len = len ( str_to_find ) return ( code_offset , code_len ) return None
Find the longest match for the string starting at offset in the preceeding data
177
16
25,354
def _merge_args_opts ( args_opts_dict , * * kwargs ) : merged = [ ] if not args_opts_dict : return merged for arg , opt in args_opts_dict . items ( ) : if not _is_sequence ( opt ) : opt = shlex . split ( opt or '' ) merged += opt if not arg : continue if 'add_input_option' in kwargs : merged . append ( '-i' ) merged . append ( arg ) return merged
Merge options with their corresponding arguments .
114
8
25,355
def run ( self , input_data = None , stdout = None , stderr = None ) : try : self . process = subprocess . Popen ( self . _cmd , stdin = subprocess . PIPE , stdout = stdout , stderr = stderr ) except OSError as e : if e . errno == errno . ENOENT : raise FFExecutableNotFoundError ( "Executable '{0}' not found" . format ( self . executable ) ) else : raise out = self . process . communicate ( input = input_data ) if self . process . returncode != 0 : raise FFRuntimeError ( self . cmd , self . process . returncode , out [ 0 ] , out [ 1 ] ) return out
Execute FFmpeg command line .
169
7
25,356
def _get_usage ( ctx ) : formatter = ctx . make_formatter ( ) pieces = ctx . command . collect_usage_pieces ( ctx ) formatter . write_usage ( ctx . command_path , ' ' . join ( pieces ) , prefix = '' ) return formatter . getvalue ( ) . rstrip ( '\n' )
Alternative non - prefixed version of get_usage .
81
11
25,357
def _get_help_record ( opt ) : def _write_opts ( opts ) : rv , _ = click . formatting . join_options ( opts ) if not opt . is_flag and not opt . count : rv += ' <{}>' . format ( opt . name ) return rv rv = [ _write_opts ( opt . opts ) ] if opt . secondary_opts : rv . append ( _write_opts ( opt . secondary_opts ) ) help = opt . help or '' extra = [ ] if opt . default is not None and opt . show_default : extra . append ( 'default: %s' % ( ', ' . join ( '%s' % d for d in opt . default ) if isinstance ( opt . default , ( list , tuple ) ) else opt . default , ) ) if opt . required : extra . append ( 'required' ) if extra : help = '%s[%s]' % ( help and help + ' ' or '' , '; ' . join ( extra ) ) return ', ' . join ( rv ) , help
Re - implementation of click . Opt . get_help_record .
245
14
25,358
def _format_description ( ctx ) : help_string = ctx . command . help or ctx . command . short_help if not help_string : return bar_enabled = False for line in statemachine . string2lines ( help_string , tab_width = 4 , convert_whitespace = True ) : if line == '\b' : bar_enabled = True continue if line == '' : bar_enabled = False line = '| ' + line if bar_enabled else line yield line yield ''
Format the description for a given click . Command .
112
10
25,359
def _format_option ( opt ) : opt = _get_help_record ( opt ) yield '.. option:: {}' . format ( opt [ 0 ] ) if opt [ 1 ] : yield '' for line in statemachine . string2lines ( opt [ 1 ] , tab_width = 4 , convert_whitespace = True ) : yield _indent ( line )
Format the output for a click . Option .
81
9
25,360
def _format_options ( ctx ) : # the hidden attribute is part of click 7.x only hence use of getattr params = [ x for x in ctx . command . params if isinstance ( x , click . Option ) and not getattr ( x , 'hidden' , False ) ] for param in params : for line in _format_option ( param ) : yield line yield ''
Format all click . Option for a click . Command .
84
11
25,361
def _format_argument ( arg ) : yield '.. option:: {}' . format ( arg . human_readable_name ) yield '' yield _indent ( '{} argument{}' . format ( 'Required' if arg . required else 'Optional' , '(s)' if arg . nargs != 1 else '' ) )
Format the output of a click . Argument .
70
9
25,362
def _format_arguments ( ctx ) : params = [ x for x in ctx . command . params if isinstance ( x , click . Argument ) ] for param in params : for line in _format_argument ( param ) : yield line yield ''
Format all click . Argument for a click . Command .
55
11
25,363
def _format_envvar ( param ) : yield '.. envvar:: {}' . format ( param . envvar ) yield ' :noindex:' yield '' if isinstance ( param , click . Argument ) : param_ref = param . human_readable_name else : # if a user has defined an opt with multiple "aliases", always use the # first. For example, if '--foo' or '-f' are possible, use '--foo'. param_ref = param . opts [ 0 ] yield _indent ( 'Provide a default for :option:`{}`' . format ( param_ref ) )
Format the envvars of a click . Option or click . Argument .
137
15
25,364
def _format_envvars ( ctx ) : params = [ x for x in ctx . command . params if getattr ( x , 'envvar' ) ] for param in params : yield '.. _{command_name}-{param_name}-{envvar}:' . format ( command_name = ctx . command_path . replace ( ' ' , '-' ) , param_name = param . name , envvar = param . envvar , ) yield '' for line in _format_envvar ( param ) : yield line yield ''
Format all envvars for a click . Command .
121
11
25,365
def _format_subcommand ( command ) : yield '.. object:: {}' . format ( command . name ) # click 7.0 stopped setting short_help by default if CLICK_VERSION < ( 7 , 0 ) : short_help = command . short_help else : short_help = command . get_short_help_str ( ) if short_help : yield '' for line in statemachine . string2lines ( short_help , tab_width = 4 , convert_whitespace = True ) : yield _indent ( line )
Format a sub - command of a click . Command or click . Group .
117
15
25,366
def _filter_commands ( ctx , commands = None ) : lookup = getattr ( ctx . command , 'commands' , { } ) if not lookup and isinstance ( ctx . command , click . MultiCommand ) : lookup = _get_lazyload_commands ( ctx . command ) if commands is None : return sorted ( lookup . values ( ) , key = lambda item : item . name ) names = [ name . strip ( ) for name in commands . split ( ',' ) ] return [ lookup [ name ] for name in names if name in lookup ]
Return list of used commands .
125
6
25,367
def _format_command ( ctx , show_nested , commands = None ) : # the hidden attribute is part of click 7.x only hence use of getattr if getattr ( ctx . command , 'hidden' , False ) : return # description for line in _format_description ( ctx ) : yield line yield '.. program:: {}' . format ( ctx . command_path ) # usage for line in _format_usage ( ctx ) : yield line # options lines = list ( _format_options ( ctx ) ) if lines : # we use rubric to provide some separation without exploding the table # of contents yield '.. rubric:: Options' yield '' for line in lines : yield line # arguments lines = list ( _format_arguments ( ctx ) ) if lines : yield '.. rubric:: Arguments' yield '' for line in lines : yield line # environment variables lines = list ( _format_envvars ( ctx ) ) if lines : yield '.. rubric:: Environment variables' yield '' for line in lines : yield line # if we're nesting commands, we need to do this slightly differently if show_nested : return commands = _filter_commands ( ctx , commands ) if commands : yield '.. rubric:: Commands' yield '' for command in commands : # Don't show hidden subcommands if CLICK_VERSION >= ( 7 , 0 ) : if command . hidden : continue for line in _format_subcommand ( command ) : yield line yield ''
Format the output of click . Command .
322
8
25,368
def _load_module ( self , module_path ) : # __import__ will fail on unicode, # so we ensure module path is a string here. module_path = str ( module_path ) try : module_name , attr_name = module_path . split ( ':' , 1 ) except ValueError : # noqa raise self . error ( '"{}" is not of format "module:parser"' . format ( module_path ) ) try : mod = __import__ ( module_name , globals ( ) , locals ( ) , [ attr_name ] ) except ( Exception , SystemExit ) as exc : # noqa err_msg = 'Failed to import "{}" from "{}". ' . format ( attr_name , module_name ) if isinstance ( exc , SystemExit ) : err_msg += 'The module appeared to call sys.exit()' else : err_msg += 'The following exception was raised:\n{}' . format ( traceback . format_exc ( ) ) raise self . error ( err_msg ) if not hasattr ( mod , attr_name ) : raise self . error ( 'Module "{}" has no attribute "{}"' . format ( module_name , attr_name ) ) parser = getattr ( mod , attr_name ) if not isinstance ( parser , click . BaseCommand ) : raise self . error ( '"{}" of type "{}" is not derived from ' '"click.BaseCommand"' . format ( type ( parser ) , module_path ) ) return parser
Load the module .
337
4
25,369
def _show_annotation_box ( self , event ) : ax = event . artist . axes # Get the pre-created annotation box for the axes or create a new one. if self . display != 'multiple' : annotation = self . annotations [ ax ] elif event . mouseevent in self . annotations : # Avoid creating multiple datacursors for the same click event # when several artists are selected. annotation = self . annotations [ event . mouseevent ] else : annotation = self . annotate ( ax , * * self . _annotation_kwargs ) self . annotations [ event . mouseevent ] = annotation if self . display == 'single' : # Hide any other annotation boxes... for ann in self . annotations . values ( ) : ann . set_visible ( False ) self . update ( event , annotation )
Update an existing box or create an annotation box for an event .
173
13
25,370
def event_info ( self , event ) : def default_func ( event ) : return { } registry = { AxesImage : [ pick_info . image_props ] , PathCollection : [ pick_info . scatter_props , self . _contour_info , pick_info . collection_props ] , Line2D : [ pick_info . line_props , pick_info . errorbar_props ] , LineCollection : [ pick_info . collection_props , self . _contour_info , pick_info . errorbar_props ] , PatchCollection : [ pick_info . collection_props , self . _contour_info ] , PolyCollection : [ pick_info . collection_props , pick_info . scatter_props ] , QuadMesh : [ pick_info . collection_props ] , Rectangle : [ pick_info . rectangle_props ] , } x , y = event . mouseevent . xdata , event . mouseevent . ydata props = dict ( x = x , y = y , label = event . artist . get_label ( ) , event = event ) props [ 'ind' ] = getattr ( event , 'ind' , None ) props [ 'point_label' ] = self . _point_label ( event ) funcs = registry . get ( type ( event . artist ) , [ default_func ] ) # 3D artist don't share inheritance. Fall back to naming convention. if '3D' in type ( event . artist ) . __name__ : funcs += [ pick_info . three_dim_props ] for func in funcs : props . update ( func ( event ) ) return props
Get a dict of info for the artist selected by event .
368
12
25,371
def _formatter ( self , x = None , y = None , z = None , s = None , label = None , * * kwargs ) : def is_date ( axis ) : fmt = axis . get_major_formatter ( ) return ( isinstance ( fmt , mdates . DateFormatter ) or isinstance ( fmt , mdates . AutoDateFormatter ) ) def format_date ( num ) : if num is not None : return mdates . num2date ( num ) . strftime ( self . date_format ) ax = kwargs [ 'event' ] . artist . axes # Display x and y with range-specific formatting if is_date ( ax . xaxis ) : x = format_date ( x ) else : limits = ax . get_xlim ( ) x = self . _format_coord ( x , limits ) kwargs [ 'xerror' ] = self . _format_coord ( kwargs . get ( 'xerror' ) , limits ) if is_date ( ax . yaxis ) : y = format_date ( y ) else : limits = ax . get_ylim ( ) y = self . _format_coord ( y , limits ) kwargs [ 'yerror' ] = self . _format_coord ( kwargs . get ( 'yerror' ) , limits ) output = [ ] for key , val in zip ( [ 'x' , 'y' , 'z' , 's' ] , [ x , y , z , s ] ) : if val is not None : try : output . append ( u'{key}: {val:0.3g}' . format ( key = key , val = val ) ) except ValueError : # X & Y will be strings at this point. # For masked arrays, etc, "z" and s values may be a string output . append ( u'{key}: {val}' . format ( key = key , val = val ) ) # label may be None or an empty string (for an un-labeled AxesImage)... # Un-labeled Line2D's will have labels that start with an underscore if label and not label . startswith ( '_' ) : output . append ( u'Label: {}' . format ( label ) ) if kwargs . get ( u'point_label' , None ) is not None : output . append ( u'Point: ' + u', ' . join ( kwargs [ 'point_label' ] ) ) for arg in [ 'xerror' , 'yerror' ] : val = kwargs . get ( arg , None ) if val is not None : output . append ( u'{}: {}' . format ( arg , val ) ) return u'\n' . join ( output )
Default formatter function if no formatter kwarg is specified . Takes information about the pick event as a series of kwargs and returns the string to be displayed .
604
35
25,372
def _format_coord ( self , x , limits ) : if x is None : return None formatter = self . _mplformatter # Trick the formatter into thinking we have an axes # The 7 tick locations is arbitrary but gives a reasonable detail level formatter . locs = np . linspace ( limits [ 0 ] , limits [ 1 ] , 7 ) formatter . _set_format ( * limits ) formatter . _set_orderOfMagnitude ( abs ( np . diff ( limits ) ) ) return formatter . pprint_val ( x )
Handles display - range - specific formatting for the x and y coords .
121
16
25,373
def _hide_box ( self , annotation ) : annotation . set_visible ( False ) if self . display == 'multiple' : annotation . axes . figure . texts . remove ( annotation ) # Remove the annotation from self.annotations. lookup = dict ( ( self . annotations [ k ] , k ) for k in self . annotations ) del self . annotations [ lookup [ annotation ] ] annotation . figure . canvas . draw ( )
Remove a specific annotation box .
90
6
25,374
def enable ( self ) : def connect ( fig ) : if self . hover : event = 'motion_notify_event' else : event = 'button_press_event' cids = [ fig . canvas . mpl_connect ( event , self . _select ) ] # None of this should be necessary. Workaround for a bug in some # mpl versions try : proxy = fig . canvas . callbacks . BoundMethodProxy ( self ) fig . canvas . callbacks . callbacks [ event ] [ cids [ - 1 ] ] = proxy except AttributeError : # In some versions of mpl, BoundMethodProxy doesn't exist... # See: https://github.com/joferkington/mpldatacursor/issues/2 pass return cids if not getattr ( self , '_enabled' , False ) : self . _cids = [ ( fig , connect ( fig ) ) for fig in self . figures ] self . _enabled = True try : # Newer versions of MPL use set_pickradius for artist in self . artists : artist . set_pickradius ( self . tolerance ) except AttributeError : # Older versions of MPL control pick radius through set_picker for artist in self . artists : artist . set_picker ( self . tolerance ) return self
Connects callbacks and makes artists pickable . If the datacursor has already been enabled this function has no effect .
278
25
25,375
def _increment_index ( self , di = 1 ) : if self . _last_event is None : return if not hasattr ( self . _last_event , 'ind' ) : return event = self . _last_event xy = pick_info . get_xy ( event . artist ) if xy is not None : x , y = xy i = ( event . ind [ 0 ] + di ) % len ( x ) event . ind = [ i ] event . mouseevent . xdata = x [ i ] event . mouseevent . ydata = y [ i ] self . update ( event , self . _last_annotation )
Move the most recently displayed annotation to the next item in the series if possible . If di is - 1 move it to the previous item .
141
28
25,376
def show_highlight ( self , artist ) : # This is a separate method to make subclassing easier. if artist in self . highlights : self . highlights [ artist ] . set_visible ( True ) else : self . highlights [ artist ] = self . create_highlight ( artist ) return self . highlights [ artist ]
Show or create a highlight for a givent artist .
68
12
25,377
def create_highlight ( self , artist ) : highlight = copy . copy ( artist ) highlight . set ( color = self . highlight_color , mec = self . highlight_color , lw = self . highlight_width , mew = self . highlight_width ) artist . axes . add_artist ( highlight ) return highlight
Create a new highlight for the given artist .
70
9
25,378
def _coords2index ( im , x , y , inverted = False ) : xmin , xmax , ymin , ymax = im . get_extent ( ) if im . origin == 'upper' : ymin , ymax = ymax , ymin data_extent = mtransforms . Bbox ( [ [ ymin , xmin ] , [ ymax , xmax ] ] ) array_extent = mtransforms . Bbox ( [ [ 0 , 0 ] , im . get_array ( ) . shape [ : 2 ] ] ) trans = mtransforms . BboxTransformFrom ( data_extent ) + mtransforms . BboxTransformTo ( array_extent ) if inverted : trans = trans . inverted ( ) return trans . transform_point ( [ y , x ] ) . astype ( int )
Converts data coordinates to index coordinates of the array .
182
11
25,379
def _interleave ( a , b ) : b = np . column_stack ( [ b ] ) # Turn b into a column array. nx , ny = b . shape c = np . zeros ( ( nx + 1 , ny + 1 ) ) c [ : , 0 ] = a c [ : - 1 , 1 : ] = b return c . ravel ( ) [ : - ( c . shape [ 1 ] - 1 ) ]
Interleave arrays a and b ; b may have multiple columns and must be shorter by 1 .
98
19
25,380
def three_dim_props ( event ) : ax = event . artist . axes if ax . M is None : return { } xd , yd = event . mouseevent . xdata , event . mouseevent . ydata p = ( xd , yd ) edges = ax . tunit_edges ( ) ldists = [ ( mplot3d . proj3d . line2d_seg_dist ( p0 , p1 , p ) , i ) for i , ( p0 , p1 ) in enumerate ( edges ) ] ldists . sort ( ) # nearest edge edgei = ldists [ 0 ] [ 1 ] p0 , p1 = edges [ edgei ] # scale the z value to match x0 , y0 , z0 = p0 x1 , y1 , z1 = p1 d0 = np . hypot ( x0 - xd , y0 - yd ) d1 = np . hypot ( x1 - xd , y1 - yd ) dt = d0 + d1 z = d1 / dt * z0 + d0 / dt * z1 x , y , z = mplot3d . proj3d . inv_transform ( xd , yd , z , ax . M ) return dict ( x = x , y = y , z = z )
Get information for a pick event on a 3D artist .
296
12
25,381
def rectangle_props ( event ) : artist = event . artist width , height = artist . get_width ( ) , artist . get_height ( ) left , bottom = artist . xy right , top = left + width , bottom + height xcenter = left + 0.5 * width ycenter = bottom + 0.5 * height label = artist . get_label ( ) if label is None or label . startswith ( '_nolegend' ) : try : label = artist . _mpldatacursor_label except AttributeError : label = None return dict ( width = width , height = height , left = left , bottom = bottom , label = label , right = right , top = top , xcenter = xcenter , ycenter = ycenter )
Returns the width height left and bottom of a rectangle artist .
165
12
25,382
def get_xy ( artist ) : xy = None if hasattr ( artist , 'get_offsets' ) : xy = artist . get_offsets ( ) . T elif hasattr ( artist , 'get_xydata' ) : xy = artist . get_xydata ( ) . T return xy
Attempts to get the x y data for individual items subitems of the artist . Returns None if this is not possible .
72
24
25,383
def datacursor ( artists = None , axes = None , * * kwargs ) : def plotted_artists ( ax ) : artists = ( ax . lines + ax . patches + ax . collections + ax . images + ax . containers ) return artists # If no axes are specified, get all axes. if axes is None : managers = pylab_helpers . Gcf . get_all_fig_managers ( ) figs = [ manager . canvas . figure for manager in managers ] axes = [ ax for fig in figs for ax in fig . axes ] if not cbook . iterable ( axes ) : axes = [ axes ] # If no artists are specified, get all manually plotted artists in all of # the specified axes. if artists is None : artists = [ artist for ax in axes for artist in plotted_artists ( ax ) ] return DataCursor ( artists , * * kwargs )
Create an interactive data cursor for the specified artists or specified axes . The data cursor displays information about a selected artist in a popup annotation box .
195
28
25,384
def wrap_exception ( func : Callable ) -> Callable : try : # only do the wrapping if pygatt is installed. # otherwise it's pointless anyway from pygatt . backends . bgapi . exceptions import BGAPIError from pygatt . exceptions import NotConnectedError except ImportError : return func def _func_wrapper ( * args , * * kwargs ) : try : return func ( * args , * * kwargs ) except BGAPIError as exception : raise BluetoothBackendException ( ) from exception except NotConnectedError as exception : raise BluetoothBackendException ( ) from exception return _func_wrapper
Decorator to wrap pygatt exceptions into BluetoothBackendException .
137
15
25,385
def write_handle ( self , handle : int , value : bytes ) : if not self . is_connected ( ) : raise BluetoothBackendException ( 'Not connected to device!' ) self . _device . char_write_handle ( handle , value , True ) return True
Write a handle to the device .
58
7
25,386
def wrap_exception ( func : Callable ) -> Callable : try : # only do the wrapping if bluepy is installed. # otherwise it's pointless anyway from bluepy . btle import BTLEException except ImportError : return func def _func_wrapper ( * args , * * kwargs ) : error_count = 0 last_error = None while error_count < RETRY_LIMIT : try : return func ( * args , * * kwargs ) except BTLEException as exception : error_count += 1 last_error = exception time . sleep ( RETRY_DELAY ) _LOGGER . debug ( 'Call to %s failed, try %d of %d' , func , error_count , RETRY_LIMIT ) raise BluetoothBackendException ( ) from last_error return _func_wrapper
Decorator to wrap BTLEExceptions into BluetoothBackendException .
180
15
25,387
def write_handle ( self , handle : int , value : bytes ) : if self . _peripheral is None : raise BluetoothBackendException ( 'not connected to backend' ) return self . _peripheral . writeCharacteristic ( handle , value , True )
Write a handle from the device .
57
7
25,388
def check_backend ( ) -> bool : try : import bluepy . btle # noqa: F401 #pylint: disable=unused-import return True except ImportError as importerror : _LOGGER . error ( 'bluepy not found: %s' , str ( importerror ) ) return False
Check if the backend is available .
70
7
25,389
def scan_for_devices ( timeout : float ) -> List [ Tuple [ str , str ] ] : from bluepy . btle import Scanner scanner = Scanner ( ) result = [ ] for device in scanner . scan ( timeout ) : result . append ( ( device . addr , device . getValueText ( 9 ) ) ) return result
Scan for bluetooth low energy devices .
73
8
25,390
def wrap_exception ( func : Callable ) -> Callable : def _func_wrapper ( * args , * * kwargs ) : try : return func ( * args , * * kwargs ) except IOError as exception : raise BluetoothBackendException ( ) from exception return _func_wrapper
Wrap all IOErrors to BluetoothBackendException
65
11
25,391
def write_handle ( self , handle : int , value : bytes ) : # noqa: C901 # pylint: disable=arguments-differ if not self . is_connected ( ) : raise BluetoothBackendException ( 'Not connected to any device.' ) attempt = 0 delay = 10 _LOGGER . debug ( "Enter write_ble (%s)" , current_thread ( ) ) while attempt <= self . retries : cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}" . format ( self . _mac , self . address_type , self . byte_to_handle ( handle ) , self . bytes_to_string ( value ) , self . adapter ) _LOGGER . debug ( "Running gatttool with a timeout of %d: %s" , self . timeout , cmd ) with Popen ( cmd , shell = True , stdout = PIPE , stderr = PIPE , preexec_fn = os . setsid ) as process : try : result = process . communicate ( timeout = self . timeout ) [ 0 ] _LOGGER . debug ( "Finished gatttool" ) except TimeoutExpired : # send signal to the process group os . killpg ( process . pid , signal . SIGINT ) result = process . communicate ( ) [ 0 ] _LOGGER . debug ( "Killed hanging gatttool" ) result = result . decode ( "utf-8" ) . strip ( ' \n\t' ) if "Write Request failed" in result : raise BluetoothBackendException ( 'Error writing handle to sensor: {}' . format ( result ) ) _LOGGER . debug ( "Got %s from gatttool" , result ) # Parse the output if "successfully" in result : _LOGGER . debug ( "Exit write_ble with result (%s)" , current_thread ( ) ) return True attempt += 1 _LOGGER . debug ( "Waiting for %s seconds before retrying" , delay ) if attempt < self . retries : time . sleep ( delay ) delay *= 2 raise BluetoothBackendException ( "Exit write_ble, no data ({})" . format ( current_thread ( ) ) )
Read from a BLE address .
495
7
25,392
def wait_for_notification ( self , handle : int , delegate , notification_timeout : float ) : if not self . is_connected ( ) : raise BluetoothBackendException ( 'Not connected to any device.' ) attempt = 0 delay = 10 _LOGGER . debug ( "Enter write_ble (%s)" , current_thread ( ) ) while attempt <= self . retries : cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen" . format ( self . _mac , self . address_type , self . byte_to_handle ( handle ) , self . bytes_to_string ( self . _DATA_MODE_LISTEN ) , self . adapter ) _LOGGER . debug ( "Running gatttool with a timeout of %d: %s" , notification_timeout , cmd ) with Popen ( cmd , shell = True , stdout = PIPE , stderr = PIPE , preexec_fn = os . setsid ) as process : try : result = process . communicate ( timeout = notification_timeout ) [ 0 ] _LOGGER . debug ( "Finished gatttool" ) except TimeoutExpired : # send signal to the process group, because listening always hangs os . killpg ( process . pid , signal . SIGINT ) result = process . communicate ( ) [ 0 ] _LOGGER . debug ( "Listening stopped forcefully after timeout." ) result = result . decode ( "utf-8" ) . strip ( ' \n\t' ) if "Write Request failed" in result : raise BluetoothBackendException ( 'Error writing handle to sensor: {}' . format ( result ) ) _LOGGER . debug ( "Got %s from gatttool" , result ) # Parse the output to determine success if "successfully" in result : _LOGGER . debug ( "Exit write_ble with result (%s)" , current_thread ( ) ) # extract useful data. for element in self . extract_notification_payload ( result ) : delegate . handleNotification ( handle , bytes ( [ int ( x , 16 ) for x in element . split ( ) ] ) ) return True attempt += 1 _LOGGER . debug ( "Waiting for %s seconds before retrying" , delay ) if attempt < self . retries : time . sleep ( delay ) delay *= 2 raise BluetoothBackendException ( "Exit write_ble, no data ({})" . format ( current_thread ( ) ) )
Listen for characteristics changes from a BLE address .
552
10
25,393
def check_backend ( ) -> bool : try : call ( 'gatttool' , stdout = PIPE , stderr = PIPE ) return True except OSError as os_err : msg = 'gatttool not found: {}' . format ( str ( os_err ) ) _LOGGER . error ( msg ) return False
Check if gatttool is available on the system .
78
12
25,394
def bytes_to_string ( raw_data : bytes , prefix : bool = False ) -> str : prefix_string = '' if prefix : prefix_string = '0x' suffix = '' . join ( [ format ( c , "02x" ) for c in raw_data ] ) return prefix_string + suffix . upper ( )
Convert a byte array to a hex string .
72
10
25,395
def decode_ast ( registry , ast_json ) : if ast_json . get ( "@type" ) : subclass = registry . get_cls ( ast_json [ "@type" ] , tuple ( ast_json [ "@fields" ] ) ) return subclass ( ast_json [ "children" ] , ast_json [ "field_references" ] , ast_json [ "label_references" ] , position = ast_json [ "@position" ] , ) else : return ast_json
JSON decoder for BaseNodes
108
7
25,396
def simplify_tree ( tree , unpack_lists = True , in_list = False ) : # TODO: copy (or (de)serialize)? outside this function? if isinstance ( tree , BaseNode ) and not isinstance ( tree , Terminal ) : used_fields = [ field for field in tree . _fields if getattr ( tree , field , False ) ] if len ( used_fields ) == 1 : result = getattr ( tree , used_fields [ 0 ] ) else : result = None if ( len ( used_fields ) != 1 or isinstance ( tree , AliasNode ) or ( in_list and isinstance ( result , list ) ) ) : result = tree for field in tree . _fields : old_value = getattr ( tree , field , None ) if old_value : setattr ( result , field , simplify_tree ( old_value , unpack_lists = unpack_lists ) , ) return result assert result is not None elif isinstance ( tree , list ) and len ( tree ) == 1 and unpack_lists : result = tree [ 0 ] else : if isinstance ( tree , list ) : result = [ simplify_tree ( el , unpack_lists = unpack_lists , in_list = True ) for el in tree ] else : result = tree return result return simplify_tree ( result , unpack_lists = unpack_lists )
Recursively unpack single - item lists and objects where fields and labels only reference a single child
302
20
25,397
def get_field ( ctx , field ) : # field can be a string or a node attribute if isinstance ( field , str ) : field = getattr ( ctx , field , None ) # when not alias needs to be called if callable ( field ) : field = field ( ) # when alias set on token, need to go from CommonToken -> Terminal Node elif isinstance ( field , CommonToken ) : # giving a name to lexer rules sets it to a token, # rather than the terminal node corresponding to that token # so we need to find it in children field = next ( filter ( lambda c : getattr ( c , "symbol" , None ) is field , ctx . children ) ) return field
Helper to get the value of a field
154
8
25,398
def get_field_names ( ctx ) : # this does not include labels and literals, only rule names and token names # TODO: check ANTLR parser template for full exclusion list fields = [ field for field in type ( ctx ) . __dict__ if not field . startswith ( "__" ) and field not in [ "accept" , "enterRule" , "exitRule" , "getRuleIndex" , "copyFrom" ] ] return fields
Get fields defined in an ANTLR context for a parser rule
102
13
25,399
def get_label_names ( ctx ) : labels = [ label for label in ctx . __dict__ if not label . startswith ( "_" ) and label not in [ "children" , "exception" , "invokingState" , "parentCtx" , "parser" , "start" , "stop" , ] ] return labels
Get labels defined in an ANTLR context for a parser rule
77
13