idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
4,100
def select_data ( db_file , slab = None , facet = None ) : con = sql . connect ( db_file ) cur = con . cursor ( ) if slab and facet : select_command = 'select chemical_composition, facet, reactants, products, reaction_energy ' 'from reaction where facet=' + str ( facet ) + ' and chemical_composition LIKE "%' + slab + '%";' elif slab and not facet : select_command = 'select chemical_composition, facet, reactants, products, reaction_energy ' 'from reaction where chemical_composition LIKE "%' + slab + '%";' else : select_command = 'select chemical_composition, facet, reactants, products, reaction_energy from reaction;' cur . execute ( select_command ) data = cur . fetchall ( ) return ( data )
Gathers relevant data from SQL database generated by CATHUB .
184
14
4,101
def file_to_df ( file_name ) : filename , file_extension = os . path . splitext ( file_name ) if file_extension == '.csv' : df = pd . read_csv ( file_name , sep = ',' , header = 0 ) . iloc [ : , 1 : ] elif file_extension == '.tsv' : df = pd . read_csv ( file_name , sep = '\t' , header = 0 ) . iloc [ : , 1 : ] else : print ( 'Please provide valid csv or tsv file format with header names.' ) return ( df )
Read in file and return pandas data_frame .
141
11
4,102
def db_to_df ( db_file , slabs = None , facet = None ) : systems = [ ] data = [ ] if slabs : for slab in slabs : data_tmp = select_data ( db_file , slab = slab , facet = facet ) data . append ( data_tmp ) subsystem = [ tup [ 0 ] for i , tup in enumerate ( data_tmp ) ] systems . append ( list ( set ( subsystem ) ) [ 0 ] ) else : data_tmp = select_data ( db_file ) data . append ( data_tmp ) df = pd . DataFrame ( ) system , facet , reactants , products , reaction_energy = [ ] , [ ] , [ ] , [ ] , [ ] for entry in data : for reaction in entry : system . append ( str ( reaction [ 0 ] ) ) facet . append ( str ( reaction [ 1 ] ) ) reactants_i = [ molecule for molecule in ast . literal_eval ( reaction [ 2 ] ) . keys ( ) ] reactants . append ( reactants_i ) products_i = [ molecule for molecule in ast . literal_eval ( reaction [ 3 ] ) . keys ( ) ] products . append ( products_i ) reaction_energy . append ( float ( reaction [ 4 ] ) ) df [ 0 ] = system df [ 1 ] = facet df [ 2 ] = reactants df [ 4 ] = products df [ 5 ] = reaction_energy df . columns = [ 'system' , 'facet' , 'reactants' , 'products' , 'reaction_energy' ] labs = auto_labels ( df ) df [ 'labels' ] = labs df = df . sort_values ( by = [ 'facet' , 'system' ] ) df = df . reset_index ( drop = True ) return ( df )
Transforms database to data frame .
397
7
4,103
def unique_reactions ( df ) : reaction_list = [ ] for idx , entry in enumerate ( df [ 'reactants' ] ) : reaction = [ ] for x in entry : reaction . append ( x ) reaction . append ( '-->' ) for y in df [ 'products' ] [ idx ] : reaction . append ( y ) reaction_list . append ( reaction ) string_list = [ str ( reaction ) for reaction in reaction_list ] string_list = sorted ( list ( set ( string_list ) ) ) reaction_list = [ ast . literal_eval ( entry ) for entry in string_list ] return ( reaction_list )
Identifies unique elementary reactions in data frame .
142
9
4,104
def get_helmholtz_energy ( self , temperature , electronic_energy = 0 , verbose = False ) : thermo_object = HarmonicThermo ( vib_energies = self . vib_energies , potentialenergy = electronic_energy ) self . helmholtz_energy = thermo_object . get_helmholtz_energy ( temperature = temperature , verbose = verbose ) return ( self . helmholtz_energy )
Returns the Helmholtz energy of an adsorbed molecule .
97
13
4,105
def get_vib_energies ( self ) : vibs = self . molecule_dict [ self . name ] [ 'vibrations' ] vibs = np . array ( vibs ) * cm2ev return ( vibs )
Returns a list of vibration in energy units eV .
52
11
4,106
def set_intermediates ( self , intermediates , betas = None , transition_states = None ) : self . intermediates = intermediates self . betas = betas self . transition_states = transition_states if self . corrections is None : self . net_corrections = [ 0.0 for _ in intermediates ] if not self . betas : self . betas = [ 0.0 for _ in intermediates ] if not self . transition_states : self . transition_states = [ False for _ in intermediates ] # check if all lists have same length: props = [ len ( self . intermediates ) , len ( self . net_corrections ) , len ( self . transition_states ) , len ( self . betas ) ] if not len ( set ( props ) ) <= 1 : raise ValueError ( 'intermediate, net_corrections, transition_states and , ' 'betas all have to have the same length' ) self . get_corrections ( ) return ( True )
Sets up intermediates and specifies whether it s an electrochemical step . Either provide individual contributions or net contributions . If both are given only the net contributions are used .
217
34
4,107
def debugger ( ) : sdb = _current [ 0 ] if sdb is None or not sdb . active : sdb = _current [ 0 ] = Sdb ( ) return sdb
Return the current debugger instance or create if none .
41
10
4,108
def global_matches ( self , text ) : matches = [ ] n = len ( text ) for word in self . namespace : if word [ : n ] == text and word != "__builtins__" : matches . append ( word ) return matches
Compute matches when text is a simple name . Return a list of all keywords built - in functions and names currently defined in self . namespace that match .
54
31
4,109
def dec2str ( n ) : s = hex ( int ( n ) ) [ 2 : ] . rstrip ( 'L' ) if len ( s ) % 2 != 0 : s = '0' + s return hex2str ( s )
decimal number to string .
53
6
4,110
def bin2str ( b ) : ret = [ ] for pos in range ( 0 , len ( b ) , 8 ) : ret . append ( chr ( int ( b [ pos : pos + 8 ] , 2 ) ) ) return '' . join ( ret )
Binary to string .
56
5
4,111
def n2s ( n ) : s = hex ( n ) [ 2 : ] . rstrip ( "L" ) if len ( s ) % 2 != 0 : s = "0" + s return s . decode ( "hex" )
Number to string .
52
4
4,112
def s2b ( s ) : ret = [ ] for c in s : ret . append ( bin ( ord ( c ) ) [ 2 : ] . zfill ( 8 ) ) return "" . join ( ret )
String to binary .
46
4
4,113
def long_to_bytes ( n , blocksize = 0 ) : # after much testing, this algorithm was deemed to be the fastest s = b'' n = int ( n ) pack = struct . pack while n > 0 : s = pack ( '>I' , n & 0xffffffff ) + s n = n >> 32 # strip off leading zeros for i in range ( len ( s ) ) : if s [ i ] != b'\000' [ 0 ] : break else : # only happens when n == 0 s = b'\000' i = 0 s = s [ i : ] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len ( s ) % blocksize : s = ( blocksize - len ( s ) % blocksize ) * b'\000' + s return s
Convert an integer to a byte string .
199
9
4,114
def make_request ( endpoint , * * kwargs ) : data = kwargs . get ( 'json' , [ ] ) package = kwargs . get ( 'package' , None ) method = kwargs . get ( 'method' , 'GET' ) function = getattr ( requests , method . lower ( ) ) try : if package : response = function ( endpoint , data = data , files = { 'file' : package } ) else : response = function ( endpoint , json = data ) except requests . exceptions . ConnectionError : LOG . error ( "Couldn't connect to NApps server %s." , endpoint ) sys . exit ( 1 ) return response
Send a request to server .
144
6
4,115
def get_napps ( self ) : endpoint = os . path . join ( self . _config . get ( 'napps' , 'api' ) , 'napps' , '' ) res = self . make_request ( endpoint ) if res . status_code != 200 : msg = 'Error getting NApps from server (%s) - %s' LOG . error ( msg , res . status_code , res . reason ) sys . exit ( 1 ) return json . loads ( res . content . decode ( 'utf-8' ) ) [ 'napps' ]
Get all NApps from the server .
122
8
4,116
def get_napp ( self , username , name ) : endpoint = os . path . join ( self . _config . get ( 'napps' , 'api' ) , 'napps' , username , name , '' ) res = self . make_request ( endpoint ) if res . status_code == 404 : # We need to know if NApp is not found return None if res . status_code != 200 : msg = 'Error getting %s/%s from server: (%d) - %s' raise KytosException ( msg % ( username , name , res . status_code , res . reason ) ) return json . loads ( res . content )
Return napp metadata or None if not found .
143
10
4,117
def reload_napps ( self , napps = None ) : if napps is None : napps = [ ] api = self . _config . get ( 'kytos' , 'api' ) endpoint = os . path . join ( api , 'api' , 'kytos' , 'core' , 'reload' , 'all' ) response = self . make_request ( endpoint ) for napp in napps : api = self . _config . get ( 'kytos' , 'api' ) endpoint = os . path . join ( api , 'api' , 'kytos' , 'core' , 'reload' , napp [ 0 ] , napp [ 1 ] ) response = self . make_request ( endpoint ) if response . status_code != 200 : raise KytosException ( 'Error reloading the napp: Module not founded' ' or could not be imported' ) return response . content
Reload a specific NApp or all Napps .
203
11
4,118
def upload_napp ( self , metadata , package ) : endpoint = os . path . join ( self . _config . get ( 'napps' , 'api' ) , 'napps' , '' ) metadata [ 'token' ] = self . _config . get ( 'auth' , 'token' ) request = self . make_request ( endpoint , json = metadata , package = package , method = "POST" ) if request . status_code != 201 : KytosConfig ( ) . clear_token ( ) LOG . error ( "%s: %s" , request . status_code , request . reason ) sys . exit ( 1 ) # WARNING: this will change in future versions, when 'author' will get # removed. username = metadata . get ( 'username' , metadata . get ( 'author' ) ) name = metadata . get ( 'name' ) print ( "SUCCESS: NApp {}/{} uploaded." . format ( username , name ) )
Upload the napp from the current directory to the napps server .
210
14
4,119
def register ( self , user_dict ) : endpoint = os . path . join ( self . _config . get ( 'napps' , 'api' ) , 'users' , '' ) res = self . make_request ( endpoint , method = 'POST' , json = user_dict ) return res . content . decode ( 'utf-8' )
Send an user_dict to NApps server using POST request .
76
13
4,120
def on_message ( self , message ) : if message . address != self . _address : return if isinstance ( message , velbus . ChannelNamePart1Message ) or isinstance ( message , velbus . ChannelNamePart1Message2 ) : self . _process_channel_name_message ( 1 , message ) elif isinstance ( message , velbus . ChannelNamePart2Message ) or isinstance ( message , velbus . ChannelNamePart2Message2 ) : self . _process_channel_name_message ( 2 , message ) elif isinstance ( message , velbus . ChannelNamePart3Message ) or isinstance ( message , velbus . ChannelNamePart3Message2 ) : self . _process_channel_name_message ( 3 , message ) elif isinstance ( message , velbus . ModuleTypeMessage ) : self . _process_module_type_message ( message ) else : self . _on_message ( message )
Process received message
203
3
4,121
def load ( self , callback ) : if callback is None : def callb ( ) : """No-op""" pass callback = callb if len ( self . _loaded_callbacks ) == 0 : self . _request_module_status ( ) self . _request_channel_name ( ) else : print ( "++++++++++++++++++++++++++++++++++" ) self . _loaded_callbacks . append ( callback ) self . _load ( )
Retrieve names of channels
91
5
4,122
def _name_messages_complete ( self ) : for channel in range ( 1 , self . number_of_channels ( ) + 1 ) : try : for name_index in range ( 1 , 4 ) : if not isinstance ( self . _name_data [ channel ] [ name_index ] , str ) : return False except Exception : return False return True
Check if all name messages have been received
79
8
4,123
def as_json ( self ) : # type: () -> dict self . _config [ 'applyCss' ] = self . applyCss self . _json [ 'config' ] = self . _config return self . _json
Represent effect as JSON dict .
50
6
4,124
def _delete_stale ( self ) : for name , hash_ in self . _stale_files . items ( ) : path = self . download_root . joinpath ( name ) if not path . exists ( ) : continue current_hash = self . _path_hash ( path ) if current_hash == hash_ : progress_logger . info ( 'deleting: %s which is stale...' , name ) path . unlink ( ) self . _stale_deleted += 1 while True : path = path . parent if path == self . download_root or list ( path . iterdir ( ) ) : break progress_logger . info ( 'deleting: %s which is stale..' , path . relative_to ( self . download_root ) ) path . rmdir ( ) else : progress_logger . error ( 'Not deleting "%s" which is in the lock file but not the definition ' 'file, however appears to have been modified since it was downloaded. ' 'Please check and delete the file manually.' , name ) raise GrablibError ( 'stale file modified' )
Delete files left in self . _stale_files . Also delete their directories if empty .
242
19
4,125
def _file_path ( self , src_path , dest , regex ) : m = re . search ( regex , src_path ) if dest . endswith ( '/' ) or dest == '' : dest += '{filename}' names = m . groupdict ( ) if not names and m . groups ( ) : names = { 'filename' : m . groups ( ) [ - 1 ] } for name , value in names . items ( ) : dest = dest . replace ( '{%s}' % name , value ) # remove starting slash so path can't be absolute dest = dest . strip ( ' /' ) if not dest : progress_logger . error ( 'destination path must not resolve to be null' ) raise GrablibError ( 'bad path' ) new_path = self . download_root . joinpath ( dest ) new_path . relative_to ( self . download_root ) return new_path
check src_path complies with regex and generate new filename
200
12
4,126
def _lock ( self , url : str , name : str , hash_ : str ) : self . _new_lock . append ( { 'url' : url , 'name' : name , 'hash' : hash_ , } ) self . _stale_files . pop ( name , None )
Add details of the files downloaded to _new_lock so they can be saved to the lock file . Also remove path from _stale_files whatever remains at the end therefore is stale and can be deleted .
65
43
4,127
def setup_smtp ( self , host , port , user , passwd , recipients , * * kwargs ) : self . _smtp = kwargs self . _smtp . update ( { 'host' : host , 'port' : port , 'user' : user , 'passwd' : passwd , 'recipients' : recipients } ) try : self . _smtp [ 'timeout' ] = int ( kwargs . get ( 'timeout' , SMTP_DEFAULT_TIMEOUT ) ) except Exception as e : logging . error ( e ) self . _smtp [ 'timeout' ] = None self . _smtp [ 'from' ] = kwargs . get ( 'from' , user )
Set up the crash reporter to send reports via email using SMTP
160
13
4,128
def enable ( self ) : if not CrashReporter . active : CrashReporter . active = True # Store this function so we can set it back if the CrashReporter is deactivated self . _excepthook = sys . excepthook sys . excepthook = self . exception_handler self . logger . info ( 'CrashReporter: Enabled' ) if self . report_dir : if os . path . exists ( self . report_dir ) : if self . get_offline_reports ( ) : # First attempt to send the reports, if that fails then start the watcher self . submit_offline_reports ( ) remaining_reports = len ( self . get_offline_reports ( ) ) if remaining_reports and self . watcher_enabled : self . start_watcher ( ) else : os . makedirs ( self . report_dir )
Enable the crash reporter . CrashReporter is defaulted to be enabled on creation .
190
17
4,129
def disable ( self ) : if CrashReporter . active : CrashReporter . active = False # Restore the original excepthook sys . excepthook = self . _excepthook self . stop_watcher ( ) self . logger . info ( 'CrashReporter: Disabled' )
Disable the crash reporter . No reports will be sent or saved .
65
13
4,130
def start_watcher ( self ) : if self . _watcher and self . _watcher . is_alive : self . _watcher_running = True else : self . logger . info ( 'CrashReporter: Starting watcher.' ) self . _watcher = Thread ( target = self . _watcher_thread , name = 'offline_reporter' ) self . _watcher . setDaemon ( True ) self . _watcher_running = True self . _watcher . start ( )
Start the watcher that periodically checks for offline reports and attempts to upload them .
112
16
4,131
def stop_watcher ( self ) : if self . _watcher : self . _watcher_running = False self . logger . info ( 'CrashReporter: Stopping watcher.' )
Stop the watcher thread that tries to send offline reports .
42
12
4,132
def subject ( self ) : if self . application_name and self . application_version : return 'Crash Report - {name} (v{version})' . format ( name = self . application_name , version = self . application_version ) else : return 'Crash Report'
Return a string to be used as the email subject line .
59
12
4,133
def store_report ( self , payload ) : offline_reports = self . get_offline_reports ( ) if offline_reports : # Increment the name of all existing reports 1 --> 2, 2 --> 3 etc. for ii , report in enumerate ( reversed ( offline_reports ) ) : rpath , ext = os . path . splitext ( report ) n = int ( re . findall ( '(\d+)' , rpath ) [ - 1 ] ) new_name = os . path . join ( self . report_dir , self . _report_name % ( n + 1 ) ) + ext shutil . copy2 ( report , new_name ) os . remove ( report ) # Delete the oldest report if len ( offline_reports ) >= self . offline_report_limit : oldest = glob . glob ( os . path . join ( self . report_dir , self . _report_name % ( self . offline_report_limit + 1 ) + '*' ) ) [ 0 ] os . remove ( oldest ) new_report_path = os . path . join ( self . report_dir , self . _report_name % 1 + '.json' ) # Write a new report with open ( new_report_path , 'w' ) as _f : json . dump ( payload , _f ) return new_report_path
Save the crash report to a file . Keeping the last offline_report_limit files in a cyclical FIFO buffer . The newest crash report always named is 01
290
34
4,134
def _watcher_thread ( self ) : while 1 : time . sleep ( self . check_interval ) if not self . _watcher_running : break self . logger . info ( 'CrashReporter: Attempting to send offline reports.' ) self . submit_offline_reports ( ) remaining_reports = len ( self . get_offline_reports ( ) ) if remaining_reports == 0 : break self . _watcher = None self . logger . info ( 'CrashReporter: Watcher stopped.' )
Periodically attempt to upload the crash reports . If any upload method is successful delete the saved reports .
112
21
4,135
def colorize ( style , msg , resp ) : code = resp . status . split ( maxsplit = 1 ) [ 0 ] if code [ 0 ] == '2' : # Put 2XX first, since it should be the common case msg = style . HTTP_SUCCESS ( msg ) elif code [ 0 ] == '1' : msg = style . HTTP_INFO ( msg ) elif code == '304' : msg = style . HTTP_NOT_MODIFIED ( msg ) elif code [ 0 ] == '3' : msg = style . HTTP_REDIRECT ( msg ) elif code == '404' : msg = style . HTTP_NOT_FOUND ( msg ) elif code [ 0 ] == '4' : msg = style . HTTP_BAD_REQUEST ( msg ) else : # Any 5XX, or any other response msg = style . HTTP_SERVER_ERROR ( msg ) return msg
Taken and modified from django . utils . log . ServerFormatter . format to mimic runserver s styling .
199
25
4,136
def access ( self , resp , req , environ , request_time ) : if not ( self . cfg . accesslog or self . cfg . logconfig or self . cfg . syslog ) : return msg = self . make_access_message ( resp , req , environ , request_time ) try : self . access_log . info ( msg ) except : self . error ( traceback . format_exc ( ) )
Override to apply styling on access logs .
94
8
4,137
def update ( cls , args ) : kytos_api = KytosConfig ( ) . config . get ( 'kytos' , 'api' ) url = f"{kytos_api}api/kytos/core/web/update" version = args [ "<version>" ] if version : url += f"/{version}" try : result = requests . post ( url ) except ( HTTPError , URLError , requests . exceptions . ConnectionError ) : LOG . error ( "Can't connect to server: %s" , kytos_api ) return if result . status_code != 200 : LOG . info ( "Error while updating web ui: %s" , result . content ) else : LOG . info ( "Web UI updated." )
Call the method to update the Web UI .
169
9
4,138
def disable ( cls , args ) : mgr = NAppsManager ( ) if args [ 'all' ] : napps = mgr . get_enabled ( ) else : napps = args [ '<napp>' ] for napp in napps : mgr . set_napp ( * napp ) LOG . info ( 'NApp %s:' , mgr . napp_id ) cls . disable_napp ( mgr )
Disable subcommand .
99
4
4,139
def disable_napp ( mgr ) : if mgr . is_enabled ( ) : LOG . info ( ' Disabling...' ) mgr . disable ( ) LOG . info ( ' Disabled.' ) else : LOG . error ( " NApp isn't enabled." )
Disable a NApp .
58
5
4,140
def enable ( cls , args ) : mgr = NAppsManager ( ) if args [ 'all' ] : napps = mgr . get_disabled ( ) else : napps = args [ '<napp>' ] cls . enable_napps ( napps )
Enable subcommand .
61
4
4,141
def enable_napp ( cls , mgr ) : try : if not mgr . is_enabled ( ) : LOG . info ( ' Enabling...' ) mgr . enable ( ) LOG . info ( ' Enabled.' ) except ( FileNotFoundError , PermissionError ) as exception : LOG . error ( ' %s' , exception )
Install one NApp using NAppManager object .
75
10
4,142
def enable_napps ( cls , napps ) : mgr = NAppsManager ( ) for napp in napps : mgr . set_napp ( * napp ) LOG . info ( 'NApp %s:' , mgr . napp_id ) cls . enable_napp ( mgr )
Enable a list of NApps .
70
7
4,143
def uninstall ( cls , args ) : mgr = NAppsManager ( ) for napp in args [ '<napp>' ] : mgr . set_napp ( * napp ) LOG . info ( 'NApp %s:' , mgr . napp_id ) if mgr . is_installed ( ) : if mgr . is_enabled ( ) : cls . disable_napp ( mgr ) LOG . info ( ' Uninstalling...' ) mgr . uninstall ( ) LOG . info ( ' Uninstalled.' ) else : LOG . error ( " NApp isn't installed." )
Uninstall and delete NApps .
133
7
4,144
def install_napps ( cls , napps ) : mgr = NAppsManager ( ) for napp in napps : mgr . set_napp ( * napp ) LOG . info ( ' NApp %s:' , mgr . napp_id ) if not mgr . is_installed ( ) : try : cls . install_napp ( mgr ) if not mgr . is_enabled ( ) : cls . enable_napp ( mgr ) napp_dependencies = mgr . dependencies ( ) if napp_dependencies : LOG . info ( 'Installing Dependencies:' ) cls . install_napps ( napp_dependencies ) else : LOG . warning ( ' Napp already enabled.' ) except KytosException : continue else : LOG . warning ( ' Napp already enabled.' )
Install local or remote NApps .
182
7
4,145
def install_napp ( cls , mgr ) : try : LOG . info ( ' Searching local NApp...' ) mgr . install_local ( ) LOG . info ( ' Found and installed.' ) except FileNotFoundError : LOG . info ( ' Not found. Downloading from NApps Server...' ) try : mgr . install_remote ( ) LOG . info ( ' Downloaded and installed.' ) return except HTTPError as exception : if exception . code == 404 : LOG . error ( ' NApp not found.' ) else : LOG . error ( ' NApps Server error: %s' , exception ) except URLError as exception : LOG . error ( ' NApps Server error: %s' , str ( exception . reason ) ) raise KytosException ( "NApp not found." )
Install a NApp .
175
5
4,146
def search ( cls , args ) : safe_shell_pat = re . escape ( args [ '<pattern>' ] ) . replace ( r'\*' , '.*' ) pat_str = '.*{}.*' . format ( safe_shell_pat ) pattern = re . compile ( pat_str , re . IGNORECASE ) remote_json = NAppsManager . search ( pattern ) remote = set ( ) for napp in remote_json : # WARNING: This will be changed in future versions, when 'author' # will be removed. username = napp . get ( 'username' , napp . get ( 'author' ) ) remote . add ( ( ( username , napp . get ( 'name' ) ) , napp . get ( 'description' ) ) ) cls . _print_napps ( remote )
Search for NApps in NApps server matching a pattern .
184
12
4,147
def _print_napps ( cls , napp_list ) : mgr = NAppsManager ( ) enabled = mgr . get_enabled ( ) installed = mgr . get_installed ( ) napps = [ ] for napp , desc in sorted ( napp_list ) : status = 'i' if napp in installed else '-' status += 'e' if napp in enabled else '-' status = '[{}]' . format ( status ) name = '{}/{}' . format ( * napp ) napps . append ( ( status , name , desc ) ) cls . print_napps ( napps )
Format the NApp list to be printed .
140
9
4,148
def list ( cls , args ) : # pylint: disable=unused-argument mgr = NAppsManager ( ) # Add status napps = [ napp + ( '[ie]' , ) for napp in mgr . get_enabled ( ) ] napps += [ napp + ( '[i-]' , ) for napp in mgr . get_disabled ( ) ] # Sort, add description and reorder columns napps . sort ( ) napps_ordered = [ ] for user , name , status in napps : description = mgr . get_description ( user , name ) version = mgr . get_version ( user , name ) napp_id = f'{user}/{name}' if version : napp_id += f':{version}' napps_ordered . append ( ( status , napp_id , description ) ) cls . print_napps ( napps_ordered )
List all installed NApps and inform whether they are enabled .
202
12
4,149
def print_napps ( napps ) : if not napps : print ( 'No NApps found.' ) return stat_w = 6 # We already know the size of Status col name_w = max ( len ( n [ 1 ] ) for n in napps ) desc_w = max ( len ( n [ 2 ] ) for n in napps ) term_w = os . popen ( 'stty size' , 'r' ) . read ( ) . split ( ) [ 1 ] remaining = max ( 0 , int ( term_w ) - stat_w - name_w - 6 ) desc_w = min ( desc_w , remaining ) widths = ( stat_w , name_w , desc_w ) header = '\n{:^%d} | {:^%d} | {:^%d}' % widths row = '{:^%d} | {:<%d} | {:<%d}' % widths print ( header . format ( 'Status' , 'NApp ID' , 'Description' ) ) print ( '=+=' . join ( '=' * w for w in widths ) ) for user , name , desc in napps : desc = ( desc [ : desc_w - 3 ] + '...' ) if len ( desc ) > desc_w else desc print ( row . format ( user , name , desc ) ) print ( '\nStatus: (i)nstalled, (e)nabled\n' )
Print status name and description .
325
6
4,150
def delete ( args ) : mgr = NAppsManager ( ) for napp in args [ '<napp>' ] : mgr . set_napp ( * napp ) LOG . info ( 'Deleting NApp %s from server...' , mgr . napp_id ) try : mgr . delete ( ) LOG . info ( ' Deleted.' ) except requests . HTTPError as exception : if exception . response . status_code == 405 : LOG . error ( 'Delete Napp is not allowed yet.' ) else : msg = json . loads ( exception . response . content ) LOG . error ( ' Server error: %s - ' , msg [ 'error' ] )
Delete NApps from server .
149
6
4,151
def reload ( cls , args ) : LOG . info ( 'Reloading NApps...' ) mgr = NAppsManager ( ) try : if args [ 'all' ] : mgr . reload ( None ) else : napps = args [ '<napp>' ] mgr . reload ( napps ) LOG . info ( '\tReloaded.' ) except requests . HTTPError as exception : if exception . response . status_code != 200 : msg = json . loads ( exception . response . content ) LOG . error ( '\tServer error: %s - ' , msg [ 'error' ] )
Reload NApps code .
132
6
4,152
def choices ( self ) : # from the reference property (instance) we need to get the value of the reference property in the model # in the reference property of the model the value is set to the ID of the model from which we can choose parts model_parent_part = self . part . model ( ) # makes single part call property_model = model_parent_part . property ( self . name ) referenced_model = self . _client . model ( pk = property_model . _value [ 'id' ] ) # makes single part call possible_choices = self . _client . parts ( model = referenced_model ) # makes multiple parts call return possible_choices
Retrieve the parts that you can reference for this ReferenceProperty .
144
13
4,153
def _generate_notebook_by_tag_body ( notebook_object , dict_by_tag ) : tag_keys = list ( dict_by_tag . keys ( ) ) tag_keys . sort ( ) for tag in tag_keys : if tag . lower ( ) not in SIGNAL_TYPE_LIST : markdown_cell = group_tag_code . TAG_TABLE_HEADER markdown_cell = markdown_cell . replace ( "Tag i" , tag ) for notebook_file in dict_by_tag [ tag ] : split_path = notebook_file . split ( "\\" ) notebook_type = split_path [ - 2 ] notebook_name = split_path [ - 1 ] . split ( "&" ) [ 0 ] notebook_title = split_path [ - 1 ] . split ( "&" ) [ 1 ] markdown_cell += "\t<tr>\n\t\t<td width='20%' class='header_image_color_" + str ( NOTEBOOK_KEYS [ notebook_type ] ) + "'><img " "src='../../images/icons/" + notebook_type . title ( ) + ".png' width='15%'>\n\t\t</td>" markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + notebook_title + "\n\t\t</td>" markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" "../" + notebook_type . title ( ) + "/" + notebook_name + "'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>" markdown_cell += "</table>" # ==================== Insertion of HTML table in a new Notebook cell ====================== notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( markdown_cell ) )
Internal function that is used for generation of the page where notebooks are organized by tag values .
459
18
4,154
def add_markdown_cell ( self , content , tags = None ) : self . notebook [ "cells" ] . append ( nb . v4 . new_markdown_cell ( content , * * { "metadata" : { "tags" : tags } } ) )
Class method responsible for adding a markdown cell with content content to the Notebook object .
60
18
4,155
def add_code_cell ( self , content , tags = None ) : self . notebook [ "cells" ] . append ( nb . v4 . new_code_cell ( content , * * { "metadata" : { "tags" : tags } } ) )
Class method responsible for adding a code cell with content content to the Notebook object .
58
17
4,156
def is_tracking_shield_displayed ( self ) : with self . selenium . context ( self . selenium . CONTEXT_CHROME ) : if self . window . firefox_version >= 63 : # Bug 1471713, 1476218 el = self . root . find_element ( * self . _tracking_protection_shield_locator ) return el . get_attribute ( "active" ) is not None el = self . root . find_element ( By . ID , "tracking-protection-icon" ) return bool ( el . get_attribute ( "state" ) )
Tracking Protection shield .
129
5
4,157
def validate_registration ( self ) : url = '/api/%s' % self . username response = self . make_request ( 'GET' , url ) if 'error' in response : return False return True
Checks if the device + username have already been registered with the bridge .
46
15
4,158
def connect ( self ) : # Don't try to register if we already have if self . validate_registration ( ) : return True body = { 'devicetype' : self . device_type , 'username' : self . username , } response = self . make_request ( 'POST' , '/api' , body ) if 'error' in response : if response [ 'error' ] [ 'type' ] == 101 : msg = 'Please press the link button and try again' else : msg = response [ 'error' ] [ 'description' ] raise Exception ( msg )
Registers a new device + username with the bridge
125
10
4,159
def get_lights ( self ) : url = '/api/%s/lights' % self . username response = self . make_request ( 'GET' , url ) lights = [ ] # Did we get a success response back? # error responses look like: # [{'error': {'address': '/lights', # 'description': 'unauthorized user', # 'type': 1}}] if 'error' in response : raise Exception ( response [ 'error' ] [ 'description' ] ) for id_ , data in response . items ( ) : lights . append ( Light ( id_ , data [ 'modelid' ] , data [ 'name' ] , data [ 'state' ] , data [ 'swversion' ] , data [ 'type' ] , data [ 'uniqueid' ] ) ) lights = sorted ( lights , key = lambda x : x . light_id ) self . _lights = lights return lights
Lists all available lights on the bridge .
199
9
4,160
def set_color ( self , light_id , hex_value , brightness = None ) : light = self . get_light ( light_id ) xy = get_xy_from_hex ( hex_value ) data = { 'xy' : [ xy . x , xy . y ] , } if brightness is not None : data [ 'bri' ] = brightness return self . set_state ( light . light_id , * * data )
This will set the light color based on a hex value
99
11
4,161
def plotfft ( s , fmax , doplot = False ) : fs = abs ( np . fft . fft ( s ) ) f = linspace ( 0 , fmax / 2 , len ( s ) / 2 ) if doplot : #pl.plot(f[1:int(len(s) / 2)], fs[1:int(len(s) / 2)]) pass return ( f [ 1 : int ( len ( s ) / 2 ) ] . copy ( ) , fs [ 1 : int ( len ( s ) / 2 ) ] . copy ( ) )
This functions computes the fft of a signal returning the frequency and their magnitude values .
131
18
4,162
def discover ( service , timeout = 5 , retries = 5 ) : group = ( '239.255.255.250' , 1900 ) message = '\r\n' . join ( [ 'M-SEARCH * HTTP/1.1' , 'HOST: {0}:{1}' , 'MAN: "ssdp:discover"' , 'ST: {st}' , 'MX: 3' , '' , '' ] ) socket . setdefaulttimeout ( timeout ) responses = { } for _ in range ( retries ) : sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM , socket . IPPROTO_UDP ) sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) sock . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , 2 ) data = message . format ( * group , st = service ) sock . sendto ( data . encode ( 'utf-8' ) , group ) while True : try : response = SSDPResponse ( sock . recv ( 1024 ) ) responses [ response . location ] = response except socket . timeout : break if responses : break return responses . values ( )
Discovers services on a network using the SSDP Protocol .
277
13
4,163
def _isinstance ( self , model , raise_error = True ) : rv = isinstance ( model , self . __model__ ) if not rv and raise_error : raise ValueError ( '%s is not of type %s' % ( model , self . __model__ ) ) return rv
Checks if the specified model instance matches the class model . By default this method will raise a ValueError if the model is not of expected type .
67
30
4,164
def _preprocess_params ( cls , kwargs ) : # kwargs.pop('csrf_token', None) for attr , val in kwargs . items ( ) : if cls . is_the_primary_key ( attr ) and cls . _prevent_primary_key_initialization_ : del kwargs [ attr ] continue if val == "" : # Making an assumption that there is no good usecase # for setting an empty string. This will help prevent # cases where empty string is sent because of client # not clearing form fields to null kwargs [ attr ] = None continue if attr in class_mapper ( cls ) . relationships and attr not in cls . _no_overwrite_ : rel = class_mapper ( cls ) . relationships [ attr ] if rel . uselist : if isinstance ( val , list ) : if all ( isinstance ( v , dict ) for v in val ) : rel_cls = cls . mapped_rel_class ( attr ) kwargs [ attr ] = rel_cls . update_or_new_all ( list_of_kwargs = val , keys = [ rel_cls . primary_key_name ( ) ] ) elif isinstance ( val , dict ) : rel_cls = cls . mapped_rel_class ( attr ) mapping_col = rel . collection_class ( ) . keyfunc . name list_of_kwargs = [ merge ( v , { mapping_col : k } ) for k , v in val . items ( ) ] kwargs [ attr ] = { getattr ( obj , mapping_col ) : obj for obj in rel_cls . update_or_new_all ( list_of_kwargs = list_of_kwargs , keys = [ rel_cls . primary_key_name ( ) ] ) } elif isinstance ( val , dict ) : rel_cls = cls . mapped_rel_class ( attr ) kwargs [ attr ] = rel_cls . update_or_new ( * * merge ( val , { 'keys' : [ rel_cls . primary_key_name ( ) ] } ) ) return kwargs
Returns a preprocessed dictionary of parameters . Use this to filter the kwargs passed to new create build methods .
498
24
4,165
def update ( self , * * kwargs ) : kwargs = self . _preprocess_params ( kwargs ) kwargs = self . preprocess_kwargs_before_update ( kwargs ) for key , value in kwargs . iteritems ( ) : cls = type ( self ) if not hasattr ( cls , key ) or isinstance ( getattr ( cls , key ) , property ) : continue if key not in self . _no_overwrite_ : setattr ( self , key , value ) if isinstance ( getattr ( self , key ) , OrderingList ) : getattr ( self , key ) . reorder ( ) elif isinstance ( getattr ( cls , key ) , AssociationProxyInstance ) : target_name = getattr ( cls , key ) . target_collection target_rel = getattr ( self , target_name ) if isinstance ( target_rel , OrderingList ) : target_rel . reorder ( ) try : self . session . commit ( ) return self except Exception as e : self . session . rollback ( ) raise e
Updates an instance .
242
5
4,166
def filter_by ( cls , * * kwargs ) : limit = kwargs . pop ( 'limit' , None ) reverse = kwargs . pop ( 'reverse' , False ) q = cls . query . filter_by ( * * kwargs ) if reverse : q = q . order_by ( cls . id . desc ( ) ) if limit : q = q . limit ( limit ) return q
Same as SQLAlchemy s filter_by . Additionally this accepts two special keyword arguments limit and reverse for limiting the results and reversing the order respectively .
93
30
4,167
def count ( cls , * criterion , * * kwargs ) : if criterion or kwargs : return cls . filter ( * criterion , * * kwargs ) . count ( ) else : return cls . query . count ( )
Returns a count of the instances meeting the specified filter criterion and kwargs .
53
16
4,168
def new ( cls , * * kwargs ) : kwargs = cls . preprocess_kwargs_before_new ( kwargs ) if cls . __mapper__ . polymorphic_on is not None : discriminator_key = cls . __mapper__ . polymorphic_on . name discriminator_val = kwargs . get ( discriminator_key ) if discriminator_val is not None and discriminator_val in cls . __mapper__ . polymorphic_map : actual_cls = cls . __mapper__ . polymorphic_map [ discriminator_val ] . class_ return actual_cls ( * * subdict ( actual_cls . _preprocess_params ( kwargs ) , actual_cls . all_settable_keys ( ) ) ) return cls ( * * subdict ( cls . _preprocess_params ( kwargs ) , cls . all_settable_keys ( ) ) )
Returns a new unsaved instance of the model class .
217
11
4,169
def add ( cls , model , commit = True ) : if not isinstance ( model , cls ) : raise ValueError ( '%s is not of type %s' % ( model , cls ) ) cls . session . add ( model ) try : if commit : cls . session . commit ( ) return model except : cls . session . rollback ( ) raise
Adds a model instance to session and commits the transaction .
82
11
4,170
def add_all ( cls , models , commit = True , check_type = False ) : if check_type : for model in models : if not isinstance ( model , cls ) : raise ValueError ( '%s is not of type %s' ( model , cls ) ) if None in models : cls . session . add_all ( [ m for m in models if m is not None ] ) else : cls . session . add_all ( models ) try : if commit : cls . session . commit ( ) return models except : cls . session . rollback ( ) raise
Batch method for adding a list of model instances to the db in one get_or_404 .
130
21
4,171
def get ( cls , keyval , key = 'id' , user_id = None ) : if keyval is None : return None if ( key in cls . __table__ . columns and cls . __table__ . columns [ key ] . primary_key ) : # if user_id and hasattr(cls, 'user_id'): # return cls.query.filter_by(id=keyval, user_id=user_id).first() return cls . query . get ( keyval ) else : result = cls . query . filter ( getattr ( cls , key ) == keyval ) # if user_id and hasattr(cls, 'user_id'): # result = result.filter(cls.user_id == user_id) return result . first ( )
Fetches a single instance which has value keyval for the attribute key .
180
16
4,172
def get_all ( cls , keyvals , key = 'id' , user_id = None ) : if len ( keyvals ) == 0 : return [ ] original_keyvals = keyvals keyvals_set = list ( set ( keyvals ) ) resultset = cls . query . filter ( getattr ( cls , key ) . in_ ( keyvals_set ) ) # This is ridiculous. user_id check cannot be here. A hangover # from the time this lib was inside our app codebase # if user_id and hasattr(cls, 'user_id'): # resultset = resultset.filter(cls.user_id == user_id) # We need the results in the same order as the input keyvals # So order by field in SQL key_result_mapping = { getattr ( result , key ) : result for result in resultset . all ( ) } return [ key_result_mapping . get ( kv ) for kv in original_keyvals ]
Works like a map function from keyvals to instances .
221
11
4,173
def create ( cls , * * kwargs ) : try : return cls . add ( cls . new ( * * kwargs ) ) except : cls . session . rollback ( ) raise
Initializes a new instance adds it to the db and commits the transaction .
45
15
4,174
def find_or_create ( cls , * * kwargs ) : keys = kwargs . pop ( 'keys' ) if 'keys' in kwargs else [ ] return cls . first ( * * subdict ( kwargs , keys ) ) or cls . create ( * * kwargs )
Checks if an instance already exists by filtering with the kwargs . If yes returns that instance . If not creates a new instance with kwargs and returns it
70
34
4,175
def update_or_create ( cls , * * kwargs ) : keys = kwargs . pop ( 'keys' ) if 'keys' in kwargs else [ ] filter_kwargs = subdict ( kwargs , keys ) if filter_kwargs == { } : obj = None else : obj = cls . first ( * * filter_kwargs ) if obj is not None : for key , value in kwargs . iteritems ( ) : if ( key not in keys and key not in cls . _no_overwrite_ ) : setattr ( obj , key , value ) try : cls . session . commit ( ) except : cls . session . rollback ( ) raise else : obj = cls . create ( * * kwargs ) return obj
Checks if an instance already exists by filtering with the kwargs . If yes updates the instance with new kwargs and returns that instance . If not creates a new instance with kwargs and returns it .
171
44
4,176
def create_all ( cls , list_of_kwargs ) : try : return cls . add_all ( [ cls . new ( * * kwargs ) if kwargs is not None else None for kwargs in list_of_kwargs ] ) except : cls . session . rollback ( ) raise
Batch method for creating a list of instances
72
9
4,177
def find_or_create_all ( cls , list_of_kwargs , keys = [ ] ) : list_of_kwargs_wo_dupes , markers = remove_and_mark_duplicate_dicts ( list_of_kwargs , keys ) added_objs = cls . add_all ( [ cls . first ( * * subdict ( kwargs , keys ) ) or cls . new ( * * kwargs ) for kwargs in list_of_kwargs_wo_dupes ] ) result_objs = [ ] iterator_of_added_objs = iter ( added_objs ) for idx in range ( len ( list_of_kwargs ) ) : if idx in markers : result_objs . append ( added_objs [ markers [ idx ] ] ) else : result_objs . append ( next ( iterator_of_added_objs ) ) return result_objs
Batch method for querying for a list of instances and creating them if required
212
16
4,178
def update_or_create_all ( cls , list_of_kwargs , keys = [ ] ) : objs = [ ] for kwargs in list_of_kwargs : filter_kwargs = subdict ( kwargs , keys ) if filter_kwargs == { } : obj = None else : obj = cls . first ( * * filter_kwargs ) if obj is not None : for key , value in kwargs . iteritems ( ) : if ( key not in keys and key not in cls . _no_overwrite_ ) : setattr ( obj , key , value ) else : obj = cls . new ( * * kwargs ) objs . append ( obj ) try : return cls . add_all ( objs ) except : cls . session . rollback ( ) raise
Batch method for updating a list of instances and creating them if required
181
14
4,179
def build ( cls , * * kwargs ) : return cls . add ( cls . new ( * * kwargs ) , commit = False )
Similar to create . But the transaction is not committed
35
10
4,180
def find_or_build ( cls , * * kwargs ) : keys = kwargs . pop ( 'keys' ) if 'keys' in kwargs else [ ] return cls . first ( * * subdict ( kwargs , keys ) ) or cls . build ( * * kwargs )
Checks if an instance already exists in db with these kwargs else returns a new saved instance of the service s model class .
70
27
4,181
def build_all ( cls , list_of_kwargs ) : return cls . add_all ( [ cls . new ( * * kwargs ) for kwargs in list_of_kwargs ] , commit = False )
Similar to create_all . But transaction is not committed .
53
12
4,182
def find_or_build_all ( cls , list_of_kwargs ) : return cls . add_all ( [ cls . first ( * * kwargs ) or cls . new ( * * kwargs ) for kwargs in list_of_kwargs ] , commit = False )
Similar to find_or_create_all . But transaction is not committed .
69
16
4,183
def update_all ( cls , * criterion , * * kwargs ) : try : r = cls . query . filter ( * criterion ) . update ( kwargs , 'fetch' ) cls . session . commit ( ) return r except : cls . session . rollback ( ) raise
Batch method for updating all instances obeying the criterion
66
11
4,184
def peakdelta ( v , delta , x = None ) : maxtab = [ ] mintab = [ ] if x is None : x = arange ( len ( v ) ) v = asarray ( v ) if len ( v ) != len ( x ) : sys . exit ( 'Input vectors v and x must have same length' ) if not isscalar ( delta ) : sys . exit ( 'Input argument delta must be a scalar' ) if delta <= 0 : sys . exit ( 'Input argument delta must be positive' ) mn , mx = Inf , - Inf mnpos , mxpos = NaN , NaN lookformax = True for i in arange ( len ( v ) ) : this = v [ i ] if this > mx : mx = this mxpos = x [ i ] if this < mn : mn = this mnpos = x [ i ] if lookformax : if this < mx - delta : maxtab . append ( ( mxpos , mx ) ) mn = this mnpos = x [ i ] lookformax = False else : if this > mn + delta : mintab . append ( ( mnpos , mn ) ) mx = this mxpos = x [ i ] lookformax = True return array ( maxtab ) , array ( mintab )
Returns two arrays
297
3
4,185
def on_status_update ( self , channel , callback ) : if channel not in self . _callbacks : self . _callbacks [ channel ] = [ ] self . _callbacks [ channel ] . append ( callback )
Callback to execute on status of update of channel
48
9
4,186
def temp_chdir ( cwd = None ) : if six . PY3 : from tempfile import TemporaryDirectory with TemporaryDirectory ( ) as tempwd : origin = cwd or os . getcwd ( ) os . chdir ( tempwd ) try : yield tempwd if os . path . exists ( tempwd ) else '' finally : os . chdir ( origin ) else : from tempfile import mkdtemp tempwd = mkdtemp ( ) origin = cwd or os . getcwd ( ) os . chdir ( tempwd ) try : yield tempwd if os . path . exists ( tempwd ) else '' finally : os . chdir ( origin )
Create and return a temporary directory which you can use as a context manager .
143
15
4,187
def parse_datetime ( value ) : if value is None : # do not process the value return None def _get_fixed_timezone ( offset ) : """Return a tzinfo instance with a fixed offset from UTC.""" if isinstance ( offset , timedelta ) : offset = offset . seconds // 60 sign = '-' if offset < 0 else '+' hhmm = '%02d%02d' % divmod ( abs ( offset ) , 60 ) name = sign + hhmm return pytz . FixedOffset ( offset , name ) DATETIME_RE = re . compile ( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})' r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?' r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$' ) match = DATETIME_RE . match ( value ) if match : kw = match . groupdict ( ) if kw [ 'microsecond' ] : kw [ 'microsecond' ] = kw [ 'microsecond' ] . ljust ( 6 , '0' ) tzinfo = kw . pop ( 'tzinfo' ) if tzinfo == 'Z' : tzinfo = pytz . UTC elif tzinfo is not None : offset_mins = int ( tzinfo [ - 2 : ] ) if len ( tzinfo ) > 3 else 0 offset = 60 * int ( tzinfo [ 1 : 3 ] ) + offset_mins if tzinfo [ 0 ] == '-' : offset = - offset tzinfo = _get_fixed_timezone ( offset ) kw = { k : int ( v ) for k , v in six . iteritems ( kw ) if v is not None } kw [ 'tzinfo' ] = tzinfo return datetime ( * * kw )
Convert datetime string to datetime object .
509
10
4,188
def _save_customization ( self , widgets ) : if len ( widgets ) > 0 : # Get the current customization and only replace the 'ext' part of it customization = self . activity . _json_data . get ( 'customization' , dict ( ) ) if customization : customization [ 'ext' ] = dict ( widgets = widgets ) else : customization = dict ( ext = dict ( widgets = widgets ) ) # Empty the customization if if the widgets list is empty else : customization = None # perform validation if customization : validate ( customization , widgetconfig_json_schema ) # Save to the activity and store the saved activity to self response = self . _client . _request ( "PUT" , self . _client . _build_url ( "activity" , activity_id = str ( self . activity . id ) ) , json = dict ( customization = customization ) ) if response . status_code != requests . codes . ok : # pragma: no cover raise APIError ( "Could not save customization ({})" . format ( response ) ) else : # refresh the activity json self . activity = self . _client . activity ( pk = self . activity . id )
Save the complete customization to the activity .
249
8
4,189
def _add_widget ( self , widget ) : widgets = self . widgets ( ) widgets += [ widget ] self . _save_customization ( widgets )
Add a widget to the customization .
33
7
4,190
def widgets ( self ) : customization = self . activity . _json_data . get ( 'customization' ) if customization and "ext" in customization . keys ( ) : return customization [ 'ext' ] [ 'widgets' ] else : return [ ]
Get the Ext JS specific customization from the activity .
55
10
4,191
def delete_widget ( self , index ) : widgets = self . widgets ( ) if len ( widgets ) == 0 : raise ValueError ( "This customization has no widgets" ) widgets . pop ( index ) self . _save_customization ( widgets )
Delete widgets by index .
53
5
4,192
def add_json_widget ( self , config ) : validate ( config , component_jsonwidget_schema ) self . _add_widget ( dict ( config = config , name = WidgetNames . JSONWIDGET ) )
Add an Ext Json Widget to the customization .
49
11
4,193
def add_property_grid_widget ( self , part_instance , max_height = None , custom_title = False , show_headers = True , show_columns = None ) : height = max_height # Check whether the parent_part_instance is uuid type or class `Part` if isinstance ( part_instance , Part ) : part_instance_id = part_instance . id elif isinstance ( part_instance , text_type ) and is_uuid ( part_instance ) : part_instance_id = part_instance part_instance = self . _client . part ( id = part_instance_id ) else : raise IllegalArgumentError ( "When using the add_property_grid_widget, part_instance must be a " "Part or Part id. Type is: {}" . format ( type ( part_instance ) ) ) if not show_columns : show_columns = list ( ) # Set the display_columns for the config possible_columns = [ ShowColumnTypes . DESCRIPTION , ShowColumnTypes . UNIT ] display_columns = dict ( ) for possible_column in possible_columns : if possible_column in show_columns : display_columns [ possible_column ] = True else : display_columns [ possible_column ] = False # Declare property grid config config = { "xtype" : ComponentXType . PROPERTYGRID , "category" : Category . INSTANCE , "filter" : { "activity_id" : str ( self . activity . id ) , "part" : part_instance_id } , "hideHeaders" : not show_headers , "viewModel" : { "data" : { "displayColumns" : display_columns } } , } # Add max height and custom title if height : config [ 'height' ] = height if custom_title is False : show_title_value = "Default" title = part_instance . name elif custom_title is None : show_title_value = "No title" title = str ( ) else : show_title_value = "Custom title" title = str ( custom_title ) config [ "title" ] = title config [ "showTitleValue" ] = show_title_value # Declare the meta info for the property grid meta = { "activityId" : str ( self . activity . id ) , "customHeight" : height if height else None , "customTitle" : title , "partInstanceId" : part_instance_id , "showColumns" : show_columns , "showHeaders" : show_headers , "showHeightValue" : "Set height" if height else "Automatic height" , "showTitleValue" : show_title_value } self . _add_widget ( dict ( config = config , meta = meta , name = WidgetNames . PROPERTYGRIDWIDGET ) )
Add a KE - chain Property Grid widget to the customization .
632
12
4,194
def add_text_widget ( self , text = None , custom_title = None , collapsible = True , collapsed = False ) : # Declare text widget config config = { "xtype" : ComponentXType . HTMLPANEL , "filter" : { "activity_id" : str ( self . activity . id ) , } } # Add text and custom title if text : config [ 'html' ] = text if custom_title : show_title_value = "Custom title" title = custom_title else : show_title_value = "No title" title = None config [ 'collapsible' ] = collapsible # A widget can only be collapsed if it is collapsible in the first place if collapsible : config [ 'collapsed' ] = collapsed else : config [ 'collapsed' ] = False config [ 'title' ] = title # Declare the meta info for the property grid meta = { "activityId" : str ( self . activity . id ) , "customTitle" : title , "collapsible" : collapsible , "collapsed" : collapsed , "html" : text , "showTitleValue" : show_title_value } self . _add_widget ( dict ( config = config , meta = meta , name = WidgetNames . HTMLWIDGET ) )
Add a KE - chain Text widget to the customization .
282
11
4,195
def enable_mp_crash_reporting ( ) : global mp_crash_reporting_enabled multiprocessing . Process = multiprocessing . process . Process = CrashReportingProcess mp_crash_reporting_enabled = True
Monkey - patch the multiprocessing . Process class with our own CrashReportingProcess . Any subsequent imports of multiprocessing . Process will reference CrashReportingProcess instead .
50
36
4,196
def feed ( self , data ) : self . buffer += data while len ( self . buffer ) >= 6 : self . next_packet ( )
Add new incoming data to buffer and try to process
31
10
4,197
def valid_header_waiting ( self ) : if len ( self . buffer ) < 4 : self . logger . debug ( "Buffer does not yet contain full header" ) result = False else : result = True result = result and self . buffer [ 0 ] == velbus . START_BYTE if not result : self . logger . warning ( "Start byte not recognized" ) result = result and ( self . buffer [ 1 ] in velbus . PRIORITY ) if not result : self . logger . warning ( "Priority not recognized" ) result = result and ( self . buffer [ 3 ] & 0x0F <= 8 ) if not result : self . logger . warning ( "Message size not recognized" ) self . logger . debug ( "Valid Header Waiting: %s(%s)" , result , str ( self . buffer ) ) return result
Check if a valid header is waiting in buffer
181
9
4,198
def valid_body_waiting ( self ) : # 0f f8 be 04 00 08 00 00 2f 04 packet_size = velbus . MINIMUM_MESSAGE_SIZE + ( self . buffer [ 3 ] & 0x0F ) if len ( self . buffer ) < packet_size : self . logger . debug ( "Buffer does not yet contain full message" ) result = False else : result = True result = result and self . buffer [ packet_size - 1 ] == velbus . END_BYTE if not result : self . logger . warning ( "End byte not recognized" ) result = result and velbus . checksum ( self . buffer [ 0 : packet_size - 2 ] ) [ 0 ] == self . buffer [ packet_size - 2 ] if not result : self . logger . warning ( "Checksum not recognized" ) self . logger . debug ( "Valid Body Waiting: %s (%s)" , result , str ( self . buffer ) ) return result
Check if a valid body is waiting in buffer
214
9
4,199
def next_packet ( self ) : try : start_byte_index = self . buffer . index ( velbus . START_BYTE ) except ValueError : self . buffer = bytes ( [ ] ) return if start_byte_index >= 0 : self . buffer = self . buffer [ start_byte_index : ] if self . valid_header_waiting ( ) and self . valid_body_waiting ( ) : next_packet = self . extract_packet ( ) self . buffer = self . buffer [ len ( next_packet ) : ] message = self . parse ( next_packet ) if isinstance ( message , velbus . Message ) : self . controller . new_message ( message )
Process next packet if present
155
5