idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
3,700
def validate ( self ) : for key , req in self . stored ( ) . items ( ) : val = getattr ( self , key ) req . validate ( val ) # Run checks defined in __checkers__ self . _run_checks ( )
Validate myself .
53
4
3,701
def validate ( method ) : @ wraps ( method ) def mod_run ( self , rinput ) : self . validate_input ( rinput ) # result = method ( self , rinput ) # self . validate_result ( result ) return result return mod_run
Decorate run method inputs and outputs are validated
56
9
3,702
def as_list ( callable ) : @ wraps ( callable ) def wrapper ( value_iter ) : return [ callable ( value ) for value in value_iter ] return wrapper
Convert a scalar validator in a list validator
39
12
3,703
def range_validator ( minval = None , maxval = None ) : def checker_func ( value ) : if minval is not None and value < minval : msg = "must be >= {}" . format ( minval ) raise ValidationError ( msg ) if maxval is not None and value > maxval : msg = "must be <= {}" . format ( maxval ) raise ValidationError ( msg ) return value return checker_func
Generates a function that validates that a number is within range
99
13
3,704
def run ( path , tasks ) : # Test if the pylpfile exists readable_path = make_readable_path ( path ) if not os . path . isfile ( path ) : logger . log ( logger . red ( "Can't read pylpfile " ) , logger . magenta ( readable_path ) ) sys . exit ( - 1 ) else : logger . log ( "Using pylpfile " , logger . magenta ( readable_path ) ) # Run the pylpfile try : runpy . run_path ( path , None , "pylpfile" ) except Exception as e : traceback . print_exc ( file = sys . stdout ) logger . log ( logger . red ( "\nAn error has occurred during the execution of the pylpfile" ) ) sys . exit ( - 1 ) # Start the tasks for name in tasks : pylp . start ( name ) # Wait until all task are executed loop = asyncio . get_event_loop ( ) loop . run_until_complete ( wait_and_quit ( loop ) )
Run a pylpfile .
233
7
3,705
async def wait_and_quit ( loop ) : from pylp . lib . tasks import running if running : await asyncio . wait ( map ( lambda runner : runner . future , running ) )
Wait until all task are executed .
43
7
3,706
def is_published ( self ) : citeable = 'publication_info' in self . record and is_citeable ( self . record [ 'publication_info' ] ) submitted = 'dois' in self . record and any ( 'journal_title' in el for el in force_list ( self . record . get ( 'publication_info' ) ) ) return citeable or submitted
Return True if a record is published .
87
8
3,707
def get_page_artid_for_publication_info ( publication_info , separator ) : if 'artid' in publication_info : return publication_info [ 'artid' ] elif 'page_start' in publication_info and 'page_end' in publication_info : page_start = publication_info [ 'page_start' ] page_end = publication_info [ 'page_end' ] return text_type ( '{}{}{}' ) . format ( page_start , text_type ( separator ) , page_end ) return ''
Return the page range or the article id of a publication_info entry .
126
15
3,708
def get_page_artid ( self , separator = '-' ) : publication_info = get_value ( self . record , 'publication_info[0]' , default = { } ) return LiteratureReader . get_page_artid_for_publication_info ( publication_info , separator )
Return the page range or the article id of a record .
68
12
3,709
def chunkreverse ( integers , dtype = 'L' ) : if dtype in ( 'B' , 8 ) : return map ( RBYTES . __getitem__ , integers ) fmt = '{0:0%db}' % NBITS [ dtype ] return ( int ( fmt . format ( chunk ) [ : : - 1 ] , 2 ) for chunk in integers )
Yield integers of dtype bit - length reverting their bit - order .
84
16
3,710
def pack ( chunks , r = 32 ) : if r < 1 : raise ValueError ( 'pack needs r > 0' ) n = shift = 0 for c in chunks : n += c << shift shift += r return n
Return integer concatenating integer chunks of r > 0 bit - length .
47
15
3,711
def unpack ( n , r = 32 ) : if r < 1 : raise ValueError ( 'unpack needs r > 0' ) mask = ( 1 << r ) - 1 while n : yield n & mask n >>= r
Yield r > 0 bit - length integers splitting n into chunks .
49
14
3,712
def packbools ( bools , dtype = 'L' ) : r = NBITS [ dtype ] atoms = ATOMS [ dtype ] for chunk in zip_longest ( * [ iter ( bools ) ] * r , fillvalue = False ) : yield sum ( compress ( atoms , chunk ) )
Yield integers concatenating bools in chunks of dtype bit - length .
69
17
3,713
def unpackbools ( integers , dtype = 'L' ) : atoms = ATOMS [ dtype ] for chunk in integers : for a in atoms : yield not not chunk & a
Yield booleans unpacking integers of dtype bit - length .
41
15
3,714
def select_data_for_fit ( list_of_wvfeatures ) : nlines_arc = len ( list_of_wvfeatures ) nfit = 0 ifit = [ ] xfit = np . array ( [ ] ) yfit = np . array ( [ ] ) wfit = np . array ( [ ] ) for i in range ( nlines_arc ) : if list_of_wvfeatures [ i ] . line_ok : ifit . append ( i ) xfit = np . append ( xfit , [ list_of_wvfeatures [ i ] . xpos ] ) yfit = np . append ( yfit , [ list_of_wvfeatures [ i ] . reference ] ) wfit = np . append ( wfit , [ list_of_wvfeatures [ i ] . funcost ] ) nfit += 1 return nfit , ifit , xfit , yfit , wfit
Select information from valid arc lines to facilitate posterior fits .
202
11
3,715
def gen_triplets_master ( wv_master , geometry = None , debugplot = 0 ) : nlines_master = wv_master . size # Check that the wavelengths in the master table are sorted wv_previous = wv_master [ 0 ] for i in range ( 1 , nlines_master ) : if wv_previous >= wv_master [ i ] : raise ValueError ( 'Wavelengths:\n--> ' + str ( wv_previous ) + '\n--> ' + str ( wv_master [ i ] ) + '\nin master table are duplicated or not sorted' ) wv_previous = wv_master [ i ] # Generate all the possible triplets with the numbers of the lines # in the master table. Each triplet is defined as a tuple of three # numbers corresponding to the three line indices in the master # table. The collection of tuples is stored in an ordinary python # list. iter_comb_triplets = itertools . combinations ( range ( nlines_master ) , 3 ) triplets_master_list = [ val for val in iter_comb_triplets ] # Verify that the number of triplets coincides with the expected # value. ntriplets_master = len ( triplets_master_list ) if ntriplets_master == comb ( nlines_master , 3 , exact = True ) : if abs ( debugplot ) >= 10 : print ( '>>> Total number of lines in master table:' , nlines_master ) print ( '>>> Number of triplets in master table...:' , ntriplets_master ) else : raise ValueError ( 'Invalid number of combinations' ) # For each triplet, compute the relative position of the central # line. ratios_master = np . zeros ( ntriplets_master ) for index , value in enumerate ( triplets_master_list ) : i1 , i2 , i3 = value delta1 = wv_master [ i2 ] - wv_master [ i1 ] delta2 = wv_master [ i3 ] - wv_master [ i1 ] ratios_master [ index ] = delta1 / delta2 # Compute the array of indices that index the above ratios in # sorted order. isort_ratios_master = np . argsort ( ratios_master ) # Simultaneous sort of position ratios and triplets. ratios_master_sorted = ratios_master [ isort_ratios_master ] triplets_master_sorted_list = [ triplets_master_list [ i ] for i in isort_ratios_master ] if abs ( debugplot ) in [ 21 , 22 ] : # compute and plot histogram with position ratios bins_in = np . linspace ( 0.0 , 1.0 , 41 ) hist , bins_out = np . histogram ( ratios_master , bins = bins_in ) # from numina . array . display . matplotlib_qt import plt fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) width_hist = 0.8 * ( bins_out [ 1 ] - bins_out [ 0 ] ) center = ( bins_out [ : - 1 ] + bins_out [ 1 : ] ) / 2 ax . bar ( center , hist , align = 'center' , width = width_hist ) ax . set_xlabel ( 'distance ratio in each triplet' ) ax . set_ylabel ( 'Number of triplets' ) ax . set_title ( "Number of lines/triplets: " + str ( nlines_master ) + "/" + str ( ntriplets_master ) ) # set window geometry set_window_geometry ( geometry ) pause_debugplot ( debugplot , pltshow = True , tight_layout = True ) return ntriplets_master , ratios_master_sorted , triplets_master_sorted_list
Compute information associated to triplets in master table .
853
11
3,716
def arccalibration ( wv_master , xpos_arc , naxis1_arc , crpix1 , wv_ini_search , wv_end_search , wvmin_useful , wvmax_useful , error_xpos_arc , times_sigma_r , frac_triplets_for_sum , times_sigma_theil_sen , poly_degree_wfit , times_sigma_polfilt , times_sigma_cook , times_sigma_inclusion , geometry = None , debugplot = 0 ) : ntriplets_master , ratios_master_sorted , triplets_master_sorted_list = gen_triplets_master ( wv_master = wv_master , geometry = geometry , debugplot = debugplot ) list_of_wvfeatures = arccalibration_direct ( wv_master = wv_master , ntriplets_master = ntriplets_master , ratios_master_sorted = ratios_master_sorted , triplets_master_sorted_list = triplets_master_sorted_list , xpos_arc = xpos_arc , naxis1_arc = naxis1_arc , crpix1 = crpix1 , wv_ini_search = wv_ini_search , wv_end_search = wv_end_search , wvmin_useful = wvmin_useful , wvmax_useful = wvmax_useful , error_xpos_arc = error_xpos_arc , times_sigma_r = times_sigma_r , frac_triplets_for_sum = frac_triplets_for_sum , times_sigma_theil_sen = times_sigma_theil_sen , poly_degree_wfit = poly_degree_wfit , times_sigma_polfilt = times_sigma_polfilt , times_sigma_cook = times_sigma_cook , times_sigma_inclusion = times_sigma_inclusion , geometry = geometry , debugplot = debugplot ) return list_of_wvfeatures
Performs arc line identification for arc calibration .
489
9
3,717
def match_wv_arrays ( wv_master , wv_expected_all_peaks , delta_wv_max ) : # initialize the output array to zero wv_verified_all_peaks = np . zeros_like ( wv_expected_all_peaks ) # initialize to True array to indicate that no peak has already # been verified (this flag avoids duplication) wv_unused = np . ones_like ( wv_expected_all_peaks , dtype = bool ) # initialize to np.infty array to store minimum distance to already # identified line minimum_delta_wv = np . ones_like ( wv_expected_all_peaks , dtype = float ) minimum_delta_wv *= np . infty # since it is likely that len(wv_master) < len(wv_expected_all_peaks), # it is more convenient to execute the search in the following order for i in range ( len ( wv_master ) ) : j = np . searchsorted ( wv_expected_all_peaks , wv_master [ i ] ) if j == 0 : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False minimum_delta_wv [ j ] = delta_wv else : if delta_wv < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] minimum_delta_wv [ j ] = delta_wv elif j == len ( wv_expected_all_peaks ) : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) if delta_wv < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : delta_wv1 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) delta_wv2 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv1 < delta_wv2 : if delta_wv1 < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv1 < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : if delta_wv2 < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False else : if delta_wv2 < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] return wv_verified_all_peaks
Match two lists with wavelengths .
841
6
3,718
def set_window_geometry ( geometry ) : if geometry is not None : x_geom , y_geom , dx_geom , dy_geom = geometry mngr = plt . get_current_fig_manager ( ) if 'window' in dir ( mngr ) : try : mngr . window . setGeometry ( x_geom , y_geom , dx_geom , dy_geom ) except AttributeError : pass else : pass
Set window geometry .
107
4
3,719
def parse_fixed_width ( types , lines ) : values = [ ] line = [ ] for width , parser in types : if not line : line = lines . pop ( 0 ) . replace ( '\n' , '' ) values . append ( parser ( line [ : width ] ) ) line = line [ width : ] return values
Parse a fixed width line .
71
7
3,720
def _parse_curves ( block , * * kwargs ) : count = int ( block . pop ( 0 ) ) curves = [ ] for i in range ( count ) : for param in [ 'mod_reduc' , 'damping' ] : length , name = parse_fixed_width ( [ ( 5 , int ) , ( 65 , to_str ) ] , block ) curves . append ( site . NonlinearProperty ( name , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , param ) ) length = int ( block [ 0 ] [ : 5 ] ) soil_types = parse_fixed_width ( ( length + 1 ) * [ ( 5 , int ) ] , block ) [ 1 : ] # Group soil type number and curves together return { ( soil_types [ i // 2 ] , c . param ) : c for i , c in enumerate ( curves ) }
Parse nonlinear curves block .
217
7
3,721
def _parse_soil_profile ( block , units , curves , * * kwargs ) : wt_layer , length , _ , name = parse_fixed_width ( 3 * [ ( 5 , int ) ] + [ ( 55 , to_str ) ] , block ) layers = [ ] soil_types = [ ] for i in range ( length ) : index , soil_idx , thickness , shear_mod , damping , unit_wt , shear_vel = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 15 , to_float ) ] + 4 * [ ( 10 , to_float ) ] , block ) st = site . SoilType ( soil_idx , unit_wt , curves [ ( soil_idx , 'mod_reduc' ) ] , curves [ ( soil_idx , 'damping' ) ] , ) try : # Try to find previously added soil type st = soil_types [ soil_types . index ( st ) ] except ValueError : soil_types . append ( st ) layers . append ( site . Layer ( st , thickness , shear_vel ) ) if units == 'english' : # Convert from English to metric for st in soil_types : st . unit_wt *= 0.00015708746 for l in layers : l . thickness *= 0.3048 l . shear_vel *= 0.3048 p = site . Profile ( layers ) p . update_layers ( ) p . wt_depth = p [ wt_layer - 1 ] . depth return p
Parse soil profile block .
347
6
3,722
def _parse_motion ( block , * * kwargs ) : _ , fa_length , time_step , name , fmt = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 10 , float ) , ( 30 , to_str ) , ( 30 , to_str ) ] , block ) scale , pga , _ , header_lines , _ = parse_fixed_width ( 3 * [ ( 10 , to_float ) ] + 2 * [ ( 5 , int ) ] , block ) m = re . search ( r'(\d+)\w(\d+)\.\d+' , fmt ) count_per_line = int ( m . group ( 1 ) ) width = int ( m . group ( 2 ) ) fname = os . path . join ( os . path . dirname ( kwargs [ 'fname' ] ) , name ) accels = np . genfromtxt ( fname , delimiter = ( count_per_line * [ width ] ) , skip_header = header_lines , ) if np . isfinite ( scale ) : pass elif np . isfinite ( pga ) : scale = pga / np . abs ( accels ) . max ( ) else : scale = 1. accels *= scale m = motion . TimeSeriesMotion ( fname , '' , time_step , accels , fa_length ) return m
Parse motin specification block .
311
7
3,723
def _parse_input_loc ( block , profile , * * kwargs ) : layer , wave_field = parse_fixed_width ( 2 * [ ( 5 , int ) ] , block ) return profile . location ( motion . WaveField [ wave_field ] , index = ( layer - 1 ) , )
Parse input location block .
67
6
3,724
def _parse_run_control ( block ) : _ , max_iterations , strain_ratio , _ , _ = parse_fixed_width ( 2 * [ ( 5 , int ) ] + [ ( 10 , float ) ] + 2 * [ ( 5 , int ) ] , block ) return propagation . EquivalentLinearCalculation ( strain_ratio , max_iterations , tolerance = 10. )
Parse run control block .
88
6
3,725
def blockgen1d ( block , size ) : def numblock ( blk , x ) : """Compute recursively the numeric intervals """ a , b = x if b - a <= blk : return [ x ] else : result = [ ] d = int ( b - a ) // 2 for i in imap ( numblock , [ blk , blk ] , [ ( a , a + d ) , ( a + d , b ) ] ) : result . extend ( i ) return result return [ slice ( * l ) for l in numblock ( block , ( 0 , size ) ) ]
Compute 1d block intervals to be used by combine .
130
12
3,726
def blockgen ( blocks , shape ) : iterables = [ blockgen1d ( l , s ) for ( l , s ) in zip ( blocks , shape ) ] return product ( * iterables )
Generate a list of slice tuples to be used by combine .
43
14
3,727
def blk_coverage_1d ( blk , size ) : rem = size % blk maxpix = size - rem return maxpix , rem
Return the part of a 1d array covered by a block .
35
13
3,728
def max_blk_coverage ( blk , shape ) : return tuple ( blk_coverage_1d ( b , s ) [ 0 ] for b , s in zip ( blk , shape ) )
Return the maximum shape of an array covered by a block .
47
12
3,729
def blk_nd_short ( blk , shape ) : internals = ( blk_1d_short ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
Iterate trough the blocks that strictly cover an array .
50
11
3,730
def blk_nd ( blk , shape ) : internals = ( blk_1d ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
Iterate through the blocks that cover an array .
46
10
3,731
def block_view ( arr , block = ( 3 , 3 ) ) : # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape = ( arr . shape [ 0 ] // block [ 0 ] , arr . shape [ 1 ] // block [ 1 ] ) + block strides = ( block [ 0 ] * arr . strides [ 0 ] , block [ 1 ] * arr . strides [ 1 ] ) + arr . strides return ast ( arr , shape = shape , strides = strides )
Provide a 2D block view to 2D array .
117
12
3,732
def is_citeable ( publication_info ) : def _item_has_pub_info ( item ) : return all ( key in item for key in ( 'journal_title' , 'journal_volume' ) ) def _item_has_page_or_artid ( item ) : return any ( key in item for key in ( 'page_start' , 'artid' ) ) has_pub_info = any ( _item_has_pub_info ( item ) for item in publication_info ) has_page_or_artid = any ( _item_has_page_or_artid ( item ) for item in publication_info ) return has_pub_info and has_page_or_artid
Check some fields in order to define if the article is citeable .
159
14
3,733
def add_abstract ( self , abstract , source = None ) : self . _append_to ( 'abstracts' , self . _sourced_dict ( source , value = abstract . strip ( ) , ) )
Add abstract .
48
3
3,734
def add_arxiv_eprint ( self , arxiv_id , arxiv_categories ) : self . _append_to ( 'arxiv_eprints' , { 'value' : arxiv_id , 'categories' : arxiv_categories , } ) self . set_citeable ( True )
Add arxiv eprint .
78
7
3,735
def add_doi ( self , doi , source = None , material = None ) : if doi is None : return try : doi = idutils . normalize_doi ( doi ) except AttributeError : return if not doi : return dois = self . _sourced_dict ( source , value = doi ) if material is not None : dois [ 'material' ] = material self . _append_to ( 'dois' , dois )
Add doi .
96
3
3,736
def make_author ( self , full_name , affiliations = ( ) , roles = ( ) , raw_affiliations = ( ) , source = None , ids = ( ) , emails = ( ) , alternative_names = ( ) ) : builder = SignatureBuilder ( ) builder . set_full_name ( full_name ) for affiliation in affiliations : builder . add_affiliation ( affiliation ) for role in roles : builder . add_inspire_role ( role ) for raw_affiliation in raw_affiliations : builder . add_raw_affiliation ( raw_affiliation , source or self . source ) for id_schema , id_value in ids : if id_schema and id_value : builder . set_uid ( id_value , schema = id_schema ) for email in emails : builder . add_email ( email ) for alternative_name in alternative_names : builder . add_alternative_name ( alternative_name ) return builder . obj
Make a subrecord representing an author .
214
8
3,737
def add_book ( self , publisher = None , place = None , date = None ) : imprint = { } if date is not None : imprint [ 'date' ] = normalize_date ( date ) if place is not None : imprint [ 'place' ] = place if publisher is not None : imprint [ 'publisher' ] = publisher self . _append_to ( 'imprints' , imprint )
Make a dictionary that is representing a book .
87
9
3,738
def add_inspire_categories ( self , subject_terms , source = None ) : for category in subject_terms : category_dict = self . _sourced_dict ( source , term = category , ) self . _append_to ( 'inspire_categories' , category_dict )
Add inspire categories .
65
4
3,739
def add_keyword ( self , keyword , schema = None , source = None ) : keyword_dict = self . _sourced_dict ( source , value = keyword ) if schema is not None : keyword_dict [ 'schema' ] = schema self . _append_to ( 'keywords' , keyword_dict )
Add a keyword .
70
4
3,740
def add_private_note ( self , private_notes , source = None ) : self . _append_to ( '_private_notes' , self . _sourced_dict ( source , value = private_notes , ) )
Add private notes .
50
4
3,741
def add_publication_info ( self , year = None , cnum = None , artid = None , page_end = None , page_start = None , journal_issue = None , journal_title = None , journal_volume = None , pubinfo_freetext = None , material = None , parent_record = None , parent_isbn = None , ) : # If only journal title is present, and no other fields, assume the # paper was submitted, but not yet published if journal_title and all ( not field for field in ( cnum , artid , journal_issue , journal_volume , page_start , page_end ) ) : self . add_public_note ( 'Submitted to {}' . format ( journal_title ) ) return publication_item = { } for key in ( 'cnum' , 'artid' , 'page_end' , 'page_start' , 'journal_issue' , 'journal_title' , 'journal_volume' , 'year' , 'pubinfo_freetext' , 'material' ) : if locals ( ) [ key ] is not None : publication_item [ key ] = locals ( ) [ key ] if parent_record is not None : parent_item = { '$ref' : parent_record } publication_item [ 'parent_record' ] = parent_item if parent_isbn is not None : publication_item [ 'parent_isbn' ] = normalize_isbn ( parent_isbn ) if page_start and page_end : try : self . add_number_of_pages ( int ( page_end ) - int ( page_start ) + 1 ) except ( TypeError , ValueError ) : pass self . _append_to ( 'publication_info' , publication_item ) if is_citeable ( self . record [ 'publication_info' ] ) : self . set_citeable ( True )
Add publication info .
421
4
3,742
def add_thesis ( self , defense_date = None , degree_type = None , institution = None , date = None ) : self . record . setdefault ( 'thesis_info' , { } ) thesis_item = { } for key in ( 'defense_date' , 'date' ) : if locals ( ) [ key ] is not None : thesis_item [ key ] = locals ( ) [ key ] if degree_type is not None : thesis_item [ 'degree_type' ] = degree_type . lower ( ) if institution is not None : thesis_item [ 'institutions' ] = [ { 'name' : institution } ] self . record [ 'thesis_info' ] = thesis_item
Add thesis info .
158
4
3,743
def add_license ( self , url = None , license = None , material = None , imposing = None ) : hep_license = { } try : license_from_url = get_license_from_url ( url ) if license_from_url is not None : license = license_from_url except ValueError : pass for key in ( 'url' , 'license' , 'material' , 'imposing' ) : if locals ( ) [ key ] is not None : hep_license [ key ] = locals ( ) [ key ] self . _append_to ( 'license' , hep_license )
Add license .
131
3
3,744
def add_public_note ( self , public_note , source = None ) : self . _append_to ( 'public_notes' , self . _sourced_dict ( source , value = public_note , ) )
Add public note .
49
4
3,745
def add_title ( self , title , subtitle = None , source = None ) : title_entry = self . _sourced_dict ( source , title = title , ) if subtitle is not None : title_entry [ 'subtitle' ] = subtitle self . _append_to ( 'titles' , title_entry )
Add title .
70
3
3,746
def add_title_translation ( self , title , language , source = None ) : title_translation = self . _sourced_dict ( source , title = title , language = language , ) self . _append_to ( 'title_translations' , title_translation )
Add title translation .
59
4
3,747
def add_report_number ( self , report_number , source = None ) : self . _append_to ( 'report_numbers' , self . _sourced_dict ( source , value = report_number , ) )
Add report numbers .
50
4
3,748
def add_collaboration ( self , collaboration ) : collaborations = normalize_collaboration ( collaboration ) for collaboration in collaborations : self . _append_to ( 'collaborations' , { 'value' : collaboration } )
Add collaboration .
49
3
3,749
def add_copyright ( self , material = None , holder = None , statement = None , url = None , year = None ) : copyright = { } for key in ( 'holder' , 'statement' , 'url' ) : if locals ( ) [ key ] is not None : copyright [ key ] = locals ( ) [ key ] if material is not None : copyright [ 'material' ] = material . lower ( ) if year is not None : copyright [ 'year' ] = int ( year ) self . _append_to ( 'copyright' , copyright )
Add Copyright .
121
3
3,750
def add_figure ( self , key , url , * * kwargs ) : figure = self . _check_metadata_for_file ( key = key , url = url , * * kwargs ) for dict_key in ( 'caption' , 'label' , 'material' , 'filename' , 'url' , 'original_url' , ) : if kwargs . get ( dict_key ) is not None : figure [ dict_key ] = kwargs [ dict_key ] if key_already_there ( figure , self . record . get ( 'figures' , ( ) ) ) : raise ValueError ( 'There\'s already a figure with the key %s.' % figure [ 'key' ] ) self . _append_to ( 'figures' , figure ) self . add_document
Add a figure .
180
4
3,751
def fit_offset_and_rotation ( coords0 , coords1 ) : coords0 = numpy . asarray ( coords0 ) coords1 = numpy . asarray ( coords1 ) cp = coords0 . mean ( axis = 0 ) cq = coords1 . mean ( axis = 0 ) p0 = coords0 - cp q0 = coords1 - cq crossvar = numpy . dot ( numpy . transpose ( p0 ) , q0 ) u , _ , vt = linalg . svd ( crossvar ) d = linalg . det ( u ) * linalg . det ( vt ) if d < 0 : u [ : , - 1 ] = - u [ : , - 1 ] rot = numpy . transpose ( numpy . dot ( u , vt ) ) # Operation is # B - B0 = R(A - A0) # So off is B0 -R * A0 # The inverse operation is # A - A0 = R* (B- B0) # So inverse off* is A - R* B0 # where R* = transpose(R) # R * off* = -off off = - numpy . dot ( rot , cp ) + cq return off , rot
Fit a rotation and a traslation between two sets points .
282
13
3,752
def pil_image3d ( input , size = ( 800 , 600 ) , pcb_rotate = ( 0 , 0 , 0 ) , timeout = 20 , showgui = False ) : f = tempfile . NamedTemporaryFile ( suffix = '.png' , prefix = 'eagexp_' ) output = f . name export_image3d ( input , output = output , size = size , pcb_rotate = pcb_rotate , timeout = timeout , showgui = showgui ) im = Image . open ( output ) return im
same as export_image3d but there is no output file PIL object is returned instead
119
19
3,753
def _make_color_fn ( color ) : def _color ( text = "" ) : return ( _color_sep + color + _color_sep2 + text + _color_sep + "default" + _color_sep2 ) return _color
Create a function that set the foreground color .
59
9
3,754
def just_log ( * texts , sep = "" ) : if config . silent : return text = _color_sep + "default" + _color_sep2 + sep . join ( texts ) array = text . split ( _color_sep ) for part in array : parts = part . split ( _color_sep2 , 1 ) if len ( parts ) != 2 or not parts [ 1 ] : continue if not config . color : print ( parts [ 1 ] , end = '' ) else : colors . foreground ( parts [ 0 ] ) print ( parts [ 1 ] , end = '' , flush = colors . is_win32 ) if config . color : colors . foreground ( "default" ) print ( )
Log a text without adding the current time .
155
9
3,755
def log ( * texts , sep = "" ) : text = sep . join ( texts ) count = text . count ( "\n" ) just_log ( "\n" * count , * get_time ( ) , text . replace ( "\n" , "" ) , sep = sep )
Log a text .
61
4
3,756
def find_files ( globs ) : last_cwd = os . getcwd ( ) os . chdir ( config . cwd ) gex , gin = separate_globs ( globs ) # Find excluded files exclude = [ ] for glob in gex : parse_glob ( glob , exclude ) files = [ ] include = [ ] order = 0 # Find included files and removed excluded files for glob in gin : order += 1 array = parse_glob ( glob , include ) base = find_base ( glob ) for file in array : if file not in exclude : files . append ( ( order , base , file ) ) os . chdir ( last_cwd ) return files
Find files to include .
148
5
3,757
def src ( globs , * * options ) : # Create an array of globs if only one string is given if isinstance ( globs , str ) : globs = [ globs ] # Find files files = find_files ( globs ) # Create a stream stream = Stream ( ) # Options options [ "cwd" ] = config . cwd if "base" in options : options [ "base" ] = os . path . abspath ( options [ "base" ] ) # Create a File object for each file to include for infile in files : file = File ( infile [ 2 ] , * * options ) file . relpath = file . path file . order = infile [ 0 ] file . base = options . get ( "base" , infile [ 1 ] ) stream . append_file ( file ) # No more files to add stream . end_of_stream ( ) # Pipe a file reader and return the stream if options . get ( "read" , True ) : return stream . pipe ( FileReader ( ) ) return stream
Read some files and return a stream .
225
8
3,758
def log_to_history ( logger , name ) : def log_to_history_decorator ( method ) : def l2h_method ( self , ri ) : history_header = fits . Header ( ) fh = FITSHistoryHandler ( history_header ) fh . setLevel ( logging . INFO ) logger . addHandler ( fh ) try : result = method ( self , ri ) field = getattr ( result , name , None ) if field : with field . open ( ) as hdulist : hdr = hdulist [ 0 ] . header hdr . extend ( history_header . cards ) return result finally : logger . removeHandler ( fh ) return l2h_method return log_to_history_decorator
Decorate function adding a logger handler stored in FITS .
168
12
3,759
def create_db_info ( ) : result = { } result [ 'instrument' ] = '' result [ 'uuid' ] = '' result [ 'tags' ] = { } result [ 'type' ] = '' result [ 'mode' ] = '' result [ 'observation_date' ] = "" result [ 'origin' ] = { } return result
Create metadata structure
79
3
3,760
def task ( obj = None , deps = None ) : # The decorator is not used as a function if callable ( obj ) : __task ( obj . __name__ , obj ) return obj # The decorator is used as a function def __decorated ( func ) : __task ( obj if obj else obj . __name__ , deps , func ) return func return __decorated
Decorator for creating a task .
86
8
3,761
def _read_one_byte ( self , fd ) : c = os . read ( fd , 1 ) if not c : raise OSError return c
Read a single byte or raise OSError on failure .
36
13
3,762
def arg_file_is_new ( parser , arg , mode = 'w' ) : if os . path . exists ( arg ) : parser . error ( "\nThe file \"%s\"\nalready exists and " "cannot be overwritten!" % arg ) else : # return an open file handle handler = open ( arg , mode = mode ) return handler
Auxiliary function to give an error if the file already exists .
79
14
3,763
def intersection_spectrail_arcline ( spectrail , arcline ) : # approximate location of the solution expected_x = ( arcline . xlower_line + arcline . xupper_line ) / 2.0 # composition of polynomials to find intersection as # one of the roots of a new polynomial rootfunct = arcline . poly_funct ( spectrail . poly_funct ) rootfunct . coef [ 1 ] -= 1 # compute roots to find solution tmp_xroots = rootfunct . roots ( ) # take the nearest root to the expected location xroot = tmp_xroots [ np . abs ( tmp_xroots - expected_x ) . argmin ( ) ] if np . isreal ( xroot ) : xroot = xroot . real else : raise ValueError ( "xroot=" + str ( xroot ) + " is a complex number" ) yroot = spectrail . poly_funct ( xroot ) return xroot , yroot
Compute intersection of spectrum trail with arc line .
220
10
3,764
def offset ( self , offset_value ) : new_instance = deepcopy ( self ) new_instance . poly_funct . coef [ 0 ] += offset_value return new_instance
Return a copy of self shifted a constant offset .
41
10
3,765
def compute_operation ( file1 , file2 , operation , output , display , args_z1z2 , args_bbox , args_keystitle , args_geometry ) : # read first FITS file with fits . open ( file1 ) as hdulist : image_header1 = hdulist [ 0 ] . header image1 = hdulist [ 0 ] . data . astype ( np . float ) naxis1 = image_header1 [ 'naxis1' ] naxis2 = image_header1 [ 'naxis2' ] # if required, display file1 if display == 'all' : ximshow_file ( file1 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) # read second FITS file with fits . open ( file2 ) as hdulist : image_header2 = hdulist [ 0 ] . header image2 = hdulist [ 0 ] . data . astype ( np . float ) naxis1_ = image_header2 [ 'naxis1' ] naxis2_ = image_header2 [ 'naxis2' ] # if required, display file2 if display == 'all' : ximshow_file ( file2 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) # check dimensions if naxis1 != naxis1_ : raise ValueError ( "NAXIS1 values are different." ) if naxis2 != naxis2_ : raise ValueError ( "NAXIS2 values are different." ) # compute operation if operation == "+" : solution = image1 + image2 elif operation == "-" : solution = image1 - image2 elif operation == "*" : solution = image1 * image2 elif operation == "/" : solution = image1 / image2 else : raise ValueError ( "Unexpected operation=" + str ( operation ) ) # save output file hdu = fits . PrimaryHDU ( solution . astype ( np . float ) , image_header1 ) hdu . writeto ( output , overwrite = True ) # if required, display result if display in [ 'all' , 'result' ] : ximshow_file ( output . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 )
Compute output = file1 operation file2 .
610
10
3,766
def robust_std ( x , debug = False ) : x = numpy . asarray ( x ) # compute percentiles and robust estimator q25 = numpy . percentile ( x , 25 ) q75 = numpy . percentile ( x , 75 ) sigmag = 0.7413 * ( q75 - q25 ) if debug : print ( 'debug|sigmag -> q25......................:' , q25 ) print ( 'debug|sigmag -> q75......................:' , q75 ) print ( 'debug|sigmag -> Robust standard deviation:' , sigmag ) return sigmag
Compute a robust estimator of the standard deviation
132
10
3,767
def summary ( x , rm_nan = False , debug = False ) : # protections if type ( x ) is np . ndarray : xx = np . copy ( x ) else : if type ( x ) is list : xx = np . array ( x ) else : raise ValueError ( 'x=' + str ( x ) + ' must be a numpy.ndarray' ) if xx . ndim is not 1 : raise ValueError ( 'xx.dim=' + str ( xx . ndim ) + ' must be 1' ) # filter out NaN's if rm_nan : xx = xx [ np . logical_not ( np . isnan ( xx ) ) ] # compute basic statistics npoints = len ( xx ) ok = npoints > 0 result = { 'npoints' : npoints , 'minimum' : np . min ( xx ) if ok else 0 , 'percentile25' : np . percentile ( xx , 25 ) if ok else 0 , 'median' : np . percentile ( xx , 50 ) if ok else 0 , 'mean' : np . mean ( xx ) if ok else 0 , 'percentile75' : np . percentile ( xx , 75 ) if ok else 0 , 'maximum' : np . max ( xx ) if ok else 0 , 'std' : np . std ( xx ) if ok else 0 , 'robust_std' : robust_std ( xx ) if ok else 0 , 'percentile15' : np . percentile ( xx , 15.86553 ) if ok else 0 , 'percentile84' : np . percentile ( xx , 84.13447 ) if ok else 0 } if debug : print ( '>>> ========================================' ) print ( '>>> STATISTICAL SUMMARY:' ) print ( '>>> ----------------------------------------' ) print ( '>>> Number of points.........:' , result [ 'npoints' ] ) print ( '>>> Minimum..................:' , result [ 'minimum' ] ) print ( '>>> 1st Quartile.............:' , result [ 'percentile25' ] ) print ( '>>> Median...................:' , result [ 'median' ] ) print ( '>>> Mean.....................:' , result [ 'mean' ] ) print ( '>>> 3rd Quartile.............:' , result [ 'percentile75' ] ) print ( '>>> Maximum..................:' , result [ 'maximum' ] ) print ( '>>> ----------------------------------------' ) print ( '>>> Standard deviation.......:' , result [ 'std' ] ) print ( '>>> Robust standard deviation:' , result [ 'robust_std' ] ) print ( '>>> 0.1586553 percentile.....:' , result [ 'percentile15' ] ) print ( '>>> 0.8413447 percentile.....:' , result [ 'percentile84' ] ) print ( '>>> ========================================' ) return result
Compute basic statistical parameters .
601
6
3,768
def fit_trace_polynomial ( trace , deg , axis = 0 ) : dispaxis = axis_to_dispaxis ( axis ) # FIT to a polynomial pfit = numpy . polyfit ( trace [ : , 0 ] , trace [ : , 1 ] , deg ) start = trace [ 0 , 0 ] stop = trace [ - 1 , 0 ] , return PolyTrace ( start , stop , axis , pfit )
Fit a trace information table to a polynomial .
96
11
3,769
def price_humanized ( value , inst , currency = None ) : return ( natural_number_with_currency ( value , ugettext ( 'CZK' ) if currency is None else currency ) if value is not None else ugettext ( '(None)' ) )
Return a humanized price
59
5
3,770
def get_imgid ( self , img ) : imgid = img . filename ( ) # More heuristics here... # get FILENAME keyword, CHECKSUM, for example... hdr = self . get_header ( img ) if 'checksum' in hdr : return hdr [ 'checksum' ] if 'filename' in hdr : return hdr [ 'filename' ] if not imgid : imgid = repr ( img ) return imgid
Obtain a unique identifier of the image .
101
9
3,771
def log_starting ( self ) : self . start_time = time . perf_counter ( ) logger . log ( "Starting '" , logger . cyan ( self . name ) , "'..." )
Log that the task has started .
42
7
3,772
def log_finished ( self ) : delta = time . perf_counter ( ) - self . start_time logger . log ( "Finished '" , logger . cyan ( self . name ) , "' after " , logger . magenta ( time_to_text ( delta ) ) )
Log that this task is done .
61
7
3,773
def call_task_fn ( self ) : if not self . fn : return self . log_finished ( ) future = asyncio . Future ( ) future . add_done_callback ( lambda x : self . log_finished ( ) ) if inspect . iscoroutinefunction ( self . fn ) : f = asyncio . ensure_future ( self . fn ( ) ) f . add_done_callback ( lambda x : self . bind_end ( x . result ( ) , future ) ) else : self . bind_end ( self . fn ( ) , future ) return future
Call the function attached to the task .
123
8
3,774
def bind_end ( self , stream , future ) : if not isinstance ( stream , Stream ) : future . set_result ( None ) else : stream . pipe ( TaskEndTransformer ( future ) )
Bind a TaskEndTransformer to a stream .
44
10
3,775
async def start_deps ( self , deps ) : # Get only new dependencies deps = list ( filter ( lambda dep : dep not in self . called , deps ) ) self . called += deps # Start only existing dependencies runners = list ( filter ( lambda x : x and x . future , map ( lambda dep : pylp . start ( dep ) , deps ) ) ) if len ( runners ) != 0 : await asyncio . wait ( map ( lambda runner : runner . future , runners ) ) # Call the attached function future = self . call_task_fn ( ) if future : await future
Start running dependencies .
132
4
3,776
def frommembers ( cls , members = ( ) ) : return cls . fromint ( sum ( map ( cls . _map . __getitem__ , set ( members ) ) ) )
Create a set from an iterable of members .
42
10
3,777
def frombools ( cls , bools = ( ) ) : return cls . fromint ( sum ( compress ( cls . _atoms , bools ) ) )
Create a set from an iterable of boolean evaluable items .
38
13
3,778
def frombits ( cls , bits = '0' ) : if len ( bits ) > cls . _len : raise ValueError ( 'too many bits %r' % ( bits , ) ) return cls . fromint ( bits [ : : - 1 ] , 2 )
Create a set from binary string .
60
7
3,779
def atoms ( self , reverse = False ) : if reverse : return filter ( self . __and__ , reversed ( self . _atoms ) ) return filter ( self . __and__ , self . _atoms )
Yield the singleton for every set member .
46
10
3,780
def inatoms ( self , reverse = False ) : if reverse : return filterfalse ( self . __and__ , reversed ( self . _atoms ) ) return filterfalse ( self . __and__ , self . _atoms )
Yield the singleton for every non - member .
50
11
3,781
def powerset ( self , start = None , excludestart = False ) : if start is None : start = self . infimum other = self . atoms ( ) else : if self | start != self : raise ValueError ( '%r is no subset of %r' % ( start , self ) ) other = self . fromint ( self & ~ start ) . atoms ( ) return map ( self . frombitset , combos . shortlex ( start , list ( other ) ) )
Yield combinations from start to self in short lexicographic order .
104
14
3,782
def change ( obj , * * changed_fields ) : obj_field_names = { field . name for field in obj . _meta . fields } | { field . attname for field in obj . _meta . fields } | { 'pk' } for field_name , value in changed_fields . items ( ) : if field_name not in obj_field_names : raise ValueError ( "'{}' is an invalid field name" . format ( field_name ) ) setattr ( obj , field_name , value ) return obj
Changes a given changed_fields on object and returns changed object .
117
13
3,783
def change_and_save ( obj , update_only_changed_fields = False , save_kwargs = None , * * changed_fields ) : from chamber . models import SmartModel change ( obj , * * changed_fields ) if update_only_changed_fields and not isinstance ( obj , SmartModel ) : raise TypeError ( 'update_only_changed_fields can be used only with SmartModel' ) save_kwargs = save_kwargs if save_kwargs is not None else { } if update_only_changed_fields : save_kwargs [ 'update_only_changed_fields' ] = True obj . save ( * * save_kwargs ) return obj
Changes a given changed_fields on object saves it and returns changed object .
149
15
3,784
def bulk_change_and_save ( iterable , update_only_changed_fields = False , save_kwargs = None , * * changed_fields ) : return [ change_and_save ( obj , update_only_changed_fields = update_only_changed_fields , save_kwargs = save_kwargs , * * changed_fields ) for obj in iterable ]
Changes a given changed_fields on each object in a given iterable saves objects and returns the changed objects .
84
22
3,785
def gauss_box_model ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * ( norm . cdf ( z2 ) - norm . cdf ( z1 ) )
Integrate a Gaussian profile .
93
7
3,786
def gauss_box_model_deriv ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm . cdf ( z2 ) - norm . cdf ( z1 ) fp2 = norm_pdf_t ( z2 ) fp1 = norm_pdf_t ( z1 ) dl = - amplitude / stddev * ( fp2 - fp1 ) ds = - amplitude / stddev * ( fp2 * z2 - fp1 * z1 ) dd = amplitude / stddev * ( fp2 + fp1 ) return da , dl , ds , dd
Derivative of the integral of a Gaussian profile .
190
12
3,787
def find_peaks_spectrum ( sx , nwinwidth , threshold = 0 , debugplot = 0 ) : if type ( sx ) is not np . ndarray : raise ValueError ( "sx=" + str ( sx ) + " must be a numpy.ndarray" ) elif sx . ndim is not 1 : raise ValueError ( "sx.ndim=" + str ( sx . ndim ) + " must be 1" ) sx_shape = sx . shape nmed = nwinwidth // 2 if debugplot >= 10 : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) print ( 'find_peaks_spectrum> nmed..........:' , nmed ) print ( 'find_peaks_spectrum> data_threshold:' , threshold ) print ( 'find_peaks_spectrum> the first and last' , nmed , 'pixels will be ignored' ) xpeaks = [ ] # list to store the peaks if sx_shape [ 0 ] < nwinwidth : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) raise ValueError ( 'sx.shape < nwinwidth' ) i = nmed while i < sx_shape [ 0 ] - nmed : if sx [ i ] > threshold : peak_ok = True j = 0 loop = True while loop : if sx [ i - nmed + j ] > sx [ i - nmed + j + 1 ] : peak_ok = False j += 1 loop = ( j < nmed ) and peak_ok if peak_ok : j = nmed + 1 loop = True while loop : if sx [ i - nmed + j - 1 ] < sx [ i - nmed + j ] : peak_ok = False j += 1 loop = ( j < nwinwidth ) and peak_ok if peak_ok : xpeaks . append ( i ) i += nwinwidth - 1 else : i += 1 else : i += 1 ixpeaks = np . array ( xpeaks ) if debugplot >= 10 : print ( 'find_peaks_spectrum> number of peaks found:' , len ( ixpeaks ) ) print ( ixpeaks ) return ixpeaks
Find peaks in array .
555
5
3,788
def make_readable_path ( path ) : home = os . path . expanduser ( "~" ) if path . startswith ( home ) : path = "~" + path [ len ( home ) : ] return path
Make a path more readable
49
5
3,789
def shortlex ( start , other , excludestart = False ) : if not excludestart : yield start queue = collections . deque ( [ ( start , other ) ] ) while queue : current , other = queue . popleft ( ) while other : first , other = other [ 0 ] , other [ 1 : ] result = current | first yield result if other : queue . append ( ( result , other ) )
Yield all unions of start with other in shortlex order .
91
13
3,790
def reverse_shortlex ( end , other , excludeend = False ) : if not excludeend : yield end queue = collections . deque ( [ ( end , other ) ] ) while queue : current , other = queue . popleft ( ) while other : first , other = other [ 0 ] , other [ 1 : ] result = current & first yield result if other : queue . append ( ( result , other ) )
Yield all intersections of end with other in reverse shortlex order .
89
14
3,791
def generate_filename ( self , instance , filename ) : from unidecode import unidecode return super ( ) . generate_filename ( instance , unidecode ( force_text ( filename ) ) )
removes UTF chars from filename
43
6
3,792
def next ( self ) : if self . blocking >= 0 : # returns queue name and item, we just need item res = self . redis . blpop ( [ self . name ] , timeout = self . blocking ) if res : res = res [ 1 ] else : res = self . redis . lpop ( self . name ) value = self . deserialize ( res ) logger . debug ( 'Popped from "%s": %s' , self . name , repr ( value ) ) return value
Retrieve the next item in the queue .
107
9
3,793
def send ( self , * args ) : # this and the serializer could use some streamlining if None in args : raise TypeError ( 'None is not a valid queue item.' ) serialized_values = [ self . serialize ( value ) for value in args ] logger . debug ( 'Sending to "%s": %s' , self . name , serialized_values ) return self . redis . rpush ( self . name , * serialized_values )
Send a value to this LIFO Queue .
100
11
3,794
def clear ( self ) : logger . debug ( 'Clearing queue: "%s"' , self . name ) return self . redis . delete ( self . name )
Clear any existing values from this queue .
35
8
3,795
def u2i ( uint32 ) : mask = ( 2 ** 32 ) - 1 if uint32 & ( 1 << 31 ) : v = uint32 | ~ mask else : v = uint32 & mask return v
Converts a 32 bit unsigned number to signed .
45
10
3,796
def _u2i ( uint32 ) : v = u2i ( uint32 ) if v < 0 : if exceptions : raise ApigpioError ( error_text ( v ) ) return v
Converts a 32 bit unsigned number to signed . If the number is negative it indicates an error . On error a pigpio exception will be raised if exceptions is True .
43
35
3,797
def append ( self , cb ) : self . callbacks . append ( cb . callb ) self . monitor = self . monitor | cb . callb . bit yield from self . pi . _pigpio_aio_command ( _PI_CMD_NB , self . handle , self . monitor )
Adds a callback .
70
4
3,798
def remove ( self , cb ) : if cb in self . callbacks : self . callbacks . remove ( cb ) new_monitor = 0 for c in self . callbacks : new_monitor |= c . bit if new_monitor != self . monitor : self . monitor = new_monitor yield from self . pi . _pigpio_aio_command ( _PI_CMD_NB , self . handle , self . monitor )
Removes a callback .
98
5
3,799
def _pigpio_aio_command ( self , cmd , p1 , p2 , ) : with ( yield from self . _lock ) : data = struct . pack ( 'IIII' , cmd , p1 , p2 , 0 ) self . _loop . sock_sendall ( self . s , data ) response = yield from self . _loop . sock_recv ( self . s , 16 ) _ , res = struct . unpack ( '12sI' , response ) return res
Runs a pigpio socket command .
110
9