idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
21,300 | def distribute ( self , f , n ) : if self . pool is None : return [ f ( i ) for i in range ( n ) ] else : return self . pool . map ( f , range ( n ) ) | Distribute the computations amongst the multiprocessing pools |
21,301 | def terminate_pool ( self ) : if self . pool is not None : self . pool . terminate ( ) self . pool . join ( ) del ( self . pool ) self . pool = None | Terminate and close the multiprocessing pool if necessary . |
21,302 | def eval_objfn ( self ) : dfd = self . obfn_dfd ( ) prj = sp . proj_l1 ( self . obfn_gvar ( ) , self . gamma , axis = self . cri . axisN + ( self . cri . axisC , self . cri . axisM ) ) cns = np . linalg . norm ( prj - self . obfn_gvar ( ) ) return ( dfd , cns ) | Compute components of regularisation function as well as total objective function . |
21,303 | def ystep ( self ) : amidx = self . index_addmsk ( ) Yi = self . cbpdn . AX [ amidx ] + self . cbpdn . U [ amidx ] self . inner_ystep ( ) Yi [ np . where ( self . W . astype ( np . bool ) ) ] = 0.0 self . cbpdn . Y [ amidx ] = Yi | This method is inserted into the inner cbpdn object replacing its own ystep method thereby providing a hook for applying the additional steps necessary for the AMS method . |
21,304 | def obfn_gvar ( self ) : gv = self . inner_obfn_gvar ( ) . copy ( ) gv [ ... , - self . cri . Cd : ] = 0 return gv | This method is inserted into the inner cbpdn object replacing its own obfn_gvar method thereby providing a hook for applying the additional steps necessary for the AMS method . |
21,305 | def solve ( self ) : Xi = self . cbpdn . solve ( ) self . timer = self . cbpdn . timer self . itstat = self . cbpdn . itstat return Xi | Call the solve method of the inner cbpdn object and return the result . |
21,306 | def reconstruct ( self , b , X = None ) : if X is None : X = self . getcoef ( ) Xf = sl . rfftn ( X , None , self . cbpdn . cri . axisN ) slc = ( slice ( None ) , ) * self . dimN + ( slice ( self . chncs [ b ] , self . chncs [ b + 1 ] ) , ) Sf = np . sum ( self . cbpdn . Df [ slc ] * Xf , axis = self . cbpdn . cri . axisM ) return sl . irfftn ( Sf , self . cbpdn . cri . Nv , self . cbpdn . cri . axisN ) | Reconstruct representation of signal b in signal set . |
21,307 | def _fix_dynamic_class_lookup ( cls , pstfx ) : extnm = '_' + cls . __name__ + '_' + pstfx mdl = sys . modules [ cls . __module__ ] setattr ( mdl , extnm , cls ) if hasattr ( cls , '__qualname__' ) : cls . __qualname__ = extnm else : cls . __name__ = extnm | Fix name lookup problem that prevents pickling of dynamically defined classes . |
21,308 | def solve_status_str ( hdrlbl , fmtmap = None , fwdth0 = 4 , fwdthdlt = 6 , fprec = 2 ) : if fmtmap is None : fmtmap = { } fwdthn = fprec + fwdthdlt fldfmt = [ fmtmap [ lbl ] if lbl in fmtmap else ( ( '%%%dd' % ( fwdth0 ) ) if idx == 0 else ( ( '%%%d.%de' % ( fwdthn , fprec ) ) ) ) for idx , lbl in enumerate ( hdrlbl ) ] fmtstr = ( ' ' ) . join ( fldfmt ) cre = re . compile ( r'%-?(\d+)' ) fldwid = [ ] for fmt in fldfmt : mtch = cre . match ( fmt ) if mtch is None : raise ValueError ( "Format string '%s' does not contain field " "width" % fmt ) else : fldwid . append ( int ( mtch . group ( 1 ) ) ) hdrlst = [ ( '%-*s' % ( w , t ) ) for t , w in zip ( hdrlbl , fldwid ) ] hdrstr = ( ' ' ) . join ( hdrlst ) return hdrstr , fmtstr , len ( hdrstr ) | Construct header and format details for status display of an iterative solver . |
21,309 | def set_attr ( self , name , val , dval = None , dtype = None , reset = False ) : if dval is not None and val is None : val = dval if dtype is not None and val is not None : if isinstance ( dtype , type ) : val = dtype ( val ) else : val = dtype . type ( val ) if reset or not hasattr ( self , name ) or ( hasattr ( self , name ) and getattr ( self , name ) is None ) : setattr ( self , name , val ) | Set an object attribute by its name . The attribute value can be specified as a primary value val and as default value dval that will be used if the primary value is None . This arrangement allows an attribute to be set from an entry in an options object passed as val while specifying a default value to use passed as dval in the event that the options entry is None . Unless reset is True the attribute is only set if it doesn t exist or if it exists with value None . This arrangement allows for attributes to be set in both base and derived class initialisers with the derived class value taking preference . |
21,310 | def _get_rank_limits ( comm , arrlen ) : rank = comm . Get_rank ( ) size = comm . Get_size ( ) end = 0 ranklen = int ( arrlen / size ) if rank < arrlen % size : ranklen += 1 end = comm . scan ( sendobj = ranklen , op = MPI . SUM ) begin = end - ranklen return ( begin , end ) | Determine the chunk of the grid that has to be computed per process . The grid has been flattened and has arrlen length . The chunk assigned to each process depends on its rank in the MPI communicator . |
21,311 | def relax_AX ( self ) : super ( ConvCnstrMODMaskDcpl_Consensus , self ) . relax_AX ( ) self . AX1nr = sl . irfftn ( sl . inner ( self . Zf , self . swapaxes ( self . Xf ) , axis = self . cri . axisM ) , self . cri . Nv , self . cri . axisN ) if self . rlx == 1.0 : self . AX1 = self . AX1nr else : alpha = self . rlx self . AX1 = alpha * self . AX1nr + ( 1 - alpha ) * ( self . Y1 + self . S ) | The parent class method that this method overrides only implements the relaxation step for the variables of the baseline consensus algorithm . This method calls the overridden method and then implements the relaxation step for the additional variables required for the mask decoupling modification to the baseline algorithm . |
21,312 | def xstep ( self ) : self . YU1 [ : ] = self . Y1 - self . U1 self . ZSf = np . conj ( self . Zf ) * ( self . Sf + sl . rfftn ( self . YU1 , None , self . cri . axisN ) ) rho = self . rho self . rho = 1.0 super ( ConvCnstrMODMaskDcpl_Consensus , self ) . xstep ( ) self . rho = rho | The xstep of the baseline consensus class from which this class is derived is re - used to implement the xstep of the modified algorithm by replacing self . ZSf which is constant in the baseline algorithm with a quantity derived from the additional variables self . Y1 and self . U1 . It is also necessary to set the penalty parameter to unity for the duration of the x step . |
21,313 | def compute_residuals ( self ) : r0 = self . rsdl_r ( self . AXnr , self . Y ) r1 = self . AX1nr - self . Y1 - self . S r = np . sqrt ( np . sum ( r0 ** 2 ) + np . sum ( r1 ** 2 ) ) ATU = self . swapaxes ( self . U ) + sl . irfftn ( np . conj ( self . Zf ) * sl . rfftn ( self . U1 , self . cri . Nv , self . cri . axisN ) , self . cri . Nv , self . cri . axisN ) s = self . rho * np . linalg . norm ( ATU ) nAX = np . sqrt ( np . linalg . norm ( self . AXnr ) ** 2 + np . linalg . norm ( self . AX1nr ) ** 2 ) nY = np . sqrt ( np . linalg . norm ( self . Y ) ** 2 + np . linalg . norm ( self . Y1 ) ** 2 ) rn = max ( nAX , nY , np . linalg . norm ( self . S ) ) sn = self . rho * np . sqrt ( np . linalg . norm ( self . U ) ** 2 + np . linalg . norm ( self . U1 ) ** 2 ) if self . opt [ 'AutoRho' , 'StdResiduals' ] : epri = np . sqrt ( self . Nc ) * self . opt [ 'AbsStopTol' ] + rn * self . opt [ 'RelStopTol' ] edua = np . sqrt ( self . Nx ) * self . opt [ 'AbsStopTol' ] + sn * self . opt [ 'RelStopTol' ] else : if rn == 0.0 : rn = 1.0 if sn == 0.0 : sn = 1.0 r /= rn s /= sn epri = np . sqrt ( self . Nc ) * self . opt [ 'AbsStopTol' ] / rn + self . opt [ 'RelStopTol' ] edua = np . sqrt ( self . Nx ) * self . opt [ 'AbsStopTol' ] / sn + self . opt [ 'RelStopTol' ] return r , s , epri , edua | Compute residuals and stopping thresholds . The parent class method is overridden to ensure that the residual calculations include the additional variables introduced in the modification to the baseline algorithm . |
21,314 | def obfn_fvar ( self ) : if self . opt [ 'fEvalX' ] : return self . X else : return self . cnst_c ( ) - self . cnst_B ( self . Y ) | Variable to be evaluated in computing regularisation term depending on fEvalX option value . |
21,315 | def normalise ( v ) : vn = np . sqrt ( np . sum ( v ** 2 , 0 ) ) vn [ vn == 0 ] = 1.0 return np . asarray ( v / vn , dtype = v . dtype ) | Normalise columns of matrix . |
21,316 | def rhochange ( self ) : self . lu , self . piv = sl . lu_factor ( self . Z , self . rho ) self . lu = np . asarray ( self . lu , dtype = self . dtype ) | Re - factorise matrix when rho changes |
21,317 | def cupy_wrapper ( func ) : @ functools . wraps ( func ) def wrapped ( * args , ** kwargs ) : args = list ( args ) for n , a in enumerate ( args ) : if isinstance ( a , np . ndarray ) : args [ n ] = cp . asarray ( a ) for k , v in kwargs . items ( ) : if isinstance ( v , np . ndarray ) : kwargs [ k ] = cp . asarray ( v ) rtn = func ( * args , ** kwargs ) if isinstance ( rtn , ( list , tuple ) ) : for n , a in enumerate ( rtn ) : if isinstance ( a , cp . core . core . ndarray ) : rtn [ n ] = cp . asnumpy ( a ) else : if isinstance ( rtn , cp . core . core . ndarray ) : rtn = cp . asnumpy ( rtn ) return rtn return wrapped | A wrapper function that converts numpy ndarray arguments to cupy arrays and convert any cupy arrays returned by the wrapped function into numpy ndarrays . |
21,318 | def block_sep1 ( self , Y ) : Y1 = Y [ ... , self . cri . M : ] if self . cri . Cd > 1 : shp = list ( Y1 . shape ) shp [ self . cri . axisM ] = self . cri . dimN shp [ self . cri . axisC ] = self . cri . Cd Y1 = Y1 . reshape ( shp ) Y1 = np . swapaxes ( Y1 [ ... , np . newaxis ] , self . cri . axisM , - 1 ) return Y1 | Separate variable into component corresponding to Y1 in Y . |
21,319 | def block_cat ( self , Y0 , Y1 ) : Y1sa = np . swapaxes ( Y1 , self . cri . axisM , - 1 ) [ ... , 0 ] if self . cri . Cd > 1 : shp = list ( Y1sa . shape ) shp [ self . cri . axisM ] *= shp [ self . cri . axisC ] shp [ self . cri . axisC ] = 1 Y1sa = Y1sa . reshape ( shp ) return np . concatenate ( ( Y0 , Y1sa ) , axis = self . cri . axisM ) | Concatenate components corresponding to Y0 and Y1 blocks into Y . |
21,320 | def obfn_g0var ( self ) : return self . var_y0 ( ) if self . opt [ 'gEvalY' ] else self . block_sep0 ( self . AXnr ) | Variable to be evaluated in computing the TV regularisation term depending on the gEvalY option value . |
21,321 | def rhochange ( self ) : self . lu , self . piv = sl . cho_factor ( self . D , self . rho ) self . lu = np . asarray ( self . lu , dtype = self . dtype ) | Re - factorise matrix when rho changes . |
21,322 | def rhochange ( self ) : self . Gamma = 1.0 / ( 1.0 + ( self . lmbda / self . rho ) * ( self . Alpha ** 2 ) ) | Action to be taken when rho parameter is changed . |
21,323 | def gpu_info ( ) : GPUInfo = namedtuple ( 'GPUInfo' , [ 'name' , 'driver' , 'totalmem' , 'freemem' ] ) gpus = GPUtil . getGPUs ( ) info = [ ] for g in gpus : info . append ( GPUInfo ( g . name , g . driver , g . memoryTotal , g . memoryFree ) ) return info | Return a list of namedtuples representing attributes of each GPU device . |
21,324 | def gpu_load ( wproc = 0.5 , wmem = 0.5 ) : GPULoad = namedtuple ( 'GPULoad' , [ 'processor' , 'memory' , 'weighted' ] ) gpus = GPUtil . getGPUs ( ) load = [ ] for g in gpus : wload = ( wproc * g . load + wmem * g . memoryUtil ) / ( wproc + wmem ) load . append ( GPULoad ( g . load , g . memoryUtil , wload ) ) return load | Return a list of namedtuples representing the current load for each GPU device . The processor and memory loads are fractions between 0 and 1 . The weighted load represents a weighted average of processor and memory loads using the parameters wproc and wmem respectively . |
21,325 | def device_by_load ( wproc = 0.5 , wmem = 0.5 ) : gl = gpu_load ( wproc = wproc , wmem = wmem ) return [ idx for idx , load in sorted ( enumerate ( [ g . weighted for g in gl ] ) , key = ( lambda x : x [ 1 ] ) ) ] | Get a list of GPU device ids ordered by increasing weighted average of processor and memory load . |
21,326 | def select_device_by_load ( wproc = 0.5 , wmem = 0.5 ) : ids = device_by_load ( wproc = wproc , wmem = wmem ) cp . cuda . Device ( ids [ 0 ] ) . use ( ) return ids [ 0 ] | Set the current device for cupy as the device with the lowest weighted average of processor and memory load . |
21,327 | def load_module ( name ) : spec = importlib . util . find_spec ( name ) mod = importlib . util . module_from_spec ( spec ) mod . __spec__ = spec mod . __loader__ = spec . loader spec . loader . exec_module ( mod ) return mod | Load the named module without registering it in sys . modules . |
21,328 | def patch_module ( name , pname , pfile = None , attrib = None ) : if attrib is None : attrib = { } spec = importlib . util . find_spec ( name ) spec . name = pname if pfile is not None : spec . origin = pfile spec . loader . name = pname mod = importlib . util . module_from_spec ( spec ) mod . __spec__ = spec mod . __loader__ = spec . loader sys . modules [ pname ] = mod spec . loader . exec_module ( mod ) for k , v in attrib . items ( ) : setattr ( mod , k , v ) return mod | Create a patched copy of the named module and register it in sys . modules . |
21,329 | def sporco_cupy_patch_module ( name , attrib = None ) : pname = re . sub ( '^sporco.' , 'sporco.cupy.' , name ) if attrib is None : attrib = { } attrib . update ( { 'np' : cp } ) mod = patch_module ( name , pname , pfile = 'patched' , attrib = attrib ) mod . __spec__ . has_location = False return mod | Create a copy of the named sporco module patch it to replace numpy with cupy and register it in sys . modules . |
21,330 | def _list2array ( lst ) : if lst and isinstance ( lst [ 0 ] , cp . ndarray ) : return cp . hstack ( lst ) else : return cp . asarray ( lst ) | Convert a list to a numpy array . |
21,331 | def sort_by_list_order ( sortlist , reflist , reverse = False , fltr = False , slemap = None ) : def keyfunc ( entry ) : if slemap is not None : rle = slemap ( entry ) if rle in reflist : return reflist . index ( rle ) else : return sortlist . index ( entry ) + len ( reflist ) if fltr : if slemap : sortlist = filter ( lambda x : slemap ( x ) in reflist , sortlist ) else : sortlist = filter ( lambda x : x in reflist , sortlist ) return sorted ( sortlist , key = keyfunc , reverse = reverse ) | Sort a list according to the order of entries in a reference list . |
21,332 | def get_module_classes ( module ) : clslst = get_module_members ( module , type = inspect . isclass ) return list ( filter ( lambda cls : not issubclass ( cls , Exception ) , clslst ) ) | Get a list of module member classes . |
21,333 | def write_module_docs ( pkgname , modpath , tmpltpath , outpath ) : dw = DocWriter ( outpath , tmpltpath ) modlst = get_module_names ( modpath , pkgname ) print ( 'Making api docs:' , end = '' ) for modname in modlst : if 'cupy' in modname or 'cuda' in modname : continue try : mod = importlib . import_module ( modname ) except ModuleNotFoundError : print ( 'Error importing module %s' % modname ) continue if mod . __file__ == 'patched' : continue if hasattr ( mod , '__path__' ) : srcpath = mod . __path__ [ 0 ] else : srcpath = mod . __file__ dstpath = os . path . join ( outpath , modname + '.rst' ) if is_newer_than ( srcpath , dstpath ) : print ( ' %s' % modname , end = '' ) dw . write ( mod ) print ( '' ) | Write the autosummary style docs for the specified package . |
21,334 | def write ( self , module ) : modname = module . __name__ ns = { } ns [ 'members' ] = dir ( module ) ns [ 'functions' ] = list ( map ( lambda x : x . __name__ , get_module_functions ( module ) ) ) ns [ 'classes' ] = list ( map ( lambda x : x . __name__ , get_module_classes ( module ) ) ) ns [ 'exceptions' ] = list ( map ( lambda x : x . __name__ , get_module_exceptions ( module ) ) ) ns [ 'fullname' ] = modname ns [ 'module' ] = modname ns [ 'objname' ] = modname ns [ 'name' ] = modname . split ( '.' ) [ - 1 ] ns [ 'objtype' ] = 'module' ns [ 'underline' ] = len ( modname ) * '=' rndr = self . template . render ( ** ns ) rstfile = os . path . join ( self . outpath , modname + '.rst' ) with open ( rstfile , 'w' ) as f : f . write ( rndr ) | Write the RST source document for generating the docs for a specified module . |
21,335 | def isxmap ( xmethod , opt ) : if xmethod == 'admm' : isx = { 'XPrRsdl' : 'PrimalRsdl' , 'XDlRsdl' : 'DualRsdl' , 'XRho' : 'Rho' } else : isx = { 'X_F_Btrack' : 'F_Btrack' , 'X_Q_Btrack' : 'Q_Btrack' , 'X_ItBt' : 'IterBTrack' , 'X_L' : 'L' , 'X_Rsdl' : 'Rsdl' } if not opt [ 'AccurateDFid' ] : isx . update ( evlmap ( True ) ) return isx | Return isxmap argument for . IterStatsConfig initialiser . |
21,336 | def isfld ( xmethod , dmethod , opt ) : fld = [ 'Iter' , 'ObjFun' , 'DFid' , 'RegL1' , 'Cnstr' ] if xmethod == 'admm' : fld . extend ( [ 'XPrRsdl' , 'XDlRsdl' , 'XRho' ] ) else : if opt [ 'CBPDN' , 'BackTrack' , 'Enabled' ] : fld . extend ( [ 'X_F_Btrack' , 'X_Q_Btrack' , 'X_ItBt' , 'X_L' , 'X_Rsdl' ] ) else : fld . extend ( [ 'X_L' , 'X_Rsdl' ] ) if dmethod != 'fista' : fld . extend ( [ 'DPrRsdl' , 'DDlRsdl' , 'DRho' ] ) else : if opt [ 'CCMOD' , 'BackTrack' , 'Enabled' ] : fld . extend ( [ 'D_F_Btrack' , 'D_Q_Btrack' , 'D_ItBt' , 'D_L' , 'D_Rsdl' ] ) else : fld . extend ( [ 'D_L' , 'D_Rsdl' ] ) fld . append ( 'Time' ) return fld | Return isfld argument for . IterStatsConfig initialiser . |
21,337 | def iterstats ( self , j , t , isx , isd , evl ) : vlst = [ ] for fnm in self . IterationStats . _fields : if fnm in self . isxmap : vlst . append ( getattr ( isx , self . isxmap [ fnm ] ) ) elif fnm in self . isdmap : vlst . append ( getattr ( isd , self . isdmap [ fnm ] ) ) elif fnm in self . evlmap : vlst . append ( evl [ fnm ] ) elif fnm == 'Iter' : vlst . append ( j ) elif fnm == 'Time' : vlst . append ( t ) else : vlst . append ( None ) return self . IterationStats . _make ( vlst ) | Construct IterationStats namedtuple from X step and D step IterationStats namedtuples . |
21,338 | def printiterstats ( self , itst ) : itdsp = tuple ( [ getattr ( itst , self . hdrmap [ col ] ) for col in self . hdrtxt ] ) print ( self . fmtstr % itdsp ) | Print iteration statistics . |
21,339 | def norm_nuclear ( X ) : r return np . sum ( np . linalg . svd ( sl . promote16 ( X ) , compute_uv = False ) ) | r Compute the nuclear norm |
21,340 | def deprecate ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : warn ( "Deprecated, this will be removed in the future" , DeprecationWarning ) return func ( * args , ** kwargs ) wrapper . __doc__ = "Deprecated.\n" + ( wrapper . __doc__ or "" ) return wrapper | A deprecation warning emmiter as a decorator . |
21,341 | def get_module_names ( package_path , pattern = "lazy_*.py*" ) : package_contents = glob ( os . path . join ( package_path [ 0 ] , pattern ) ) relative_path_names = ( os . path . split ( name ) [ 1 ] for name in package_contents ) no_ext_names = ( os . path . splitext ( name ) [ 0 ] for name in relative_path_names ) return sorted ( set ( no_ext_names ) ) | All names in the package directory that matches the given glob without their extension . Repeated names should appear only once . |
21,342 | def get_modules ( package_name , module_names ) : def get_module ( name ) : return __import__ ( "." . join ( [ package_name , name ] ) , fromlist = [ package_name ] ) return [ get_module ( name ) for name in module_names ] | List of module objects from the package keeping the name order . |
21,343 | def docstring_with_summary ( docstring , pairs , key_header , summary_type ) : return "\n" . join ( [ docstring , "Summary of {}:" . format ( summary_type ) , "" ] + summary_table ( pairs , key_header ) + [ "" ] ) | Return a string joining the docstring with the pairs summary table . |
21,344 | def memoize ( func ) : class Memoizer ( dict ) : def __missing__ ( self , args ) : val = func ( * args ) self [ args ] = val return val memory = Memoizer ( ) @ wraps ( func ) def wrapper ( * args ) : return memory [ args ] return wrapper | Decorator for unerasable memoization based on function arguments for functions without keyword arguments . |
21,345 | def save_to_16bit_wave_file ( fname , sig , rate ) : with closing ( wave . open ( fname , "wb" ) ) as wave_file : wave_file . setnchannels ( 1 ) wave_file . setsampwidth ( 2 ) wave_file . setframerate ( rate ) for chunk in chunks ( ( clip ( sig ) * 2 ** 15 ) . map ( int ) , dfmt = "h" , padval = 0 ) : wave_file . writeframes ( chunk ) | Save a given signal sig to file fname as a 16 - bit one - channel wave with the given rate sample rate . |
21,346 | def new_note_track ( env , synth ) : list_env = list ( env ) return chain . from_iterable ( synth ( freq ) * list_env for freq in freq_gen ( ) ) | Audio track with the frequencies . |
21,347 | def tostream ( func , module_name = None ) : @ wraps ( func ) def new_func ( * args , ** kwargs ) : return Stream ( func ( * args , ** kwargs ) ) if module_name is not None : new_func . __module__ = module_name return new_func | Decorator to convert the function output into a Stream . Useful for generator functions . |
21,348 | def thub ( data , n ) : return StreamTeeHub ( data , n ) if isinstance ( data , Iterable ) else data | Tee or T hub auto - copier to help working with Stream instances as well as with numbers . |
21,349 | def blocks ( self , * args , ** kwargs ) : return Stream ( blocks ( iter ( self ) , * args , ** kwargs ) ) | Interface to apply audiolazy . blocks directly in a stream returning another stream . Use keyword args . |
21,350 | def take ( self , n = None , constructor = list ) : if n is None : return next ( self . _data ) if isinf ( n ) and n > 0 : return constructor ( self . _data ) if isinstance ( n , float ) : n = rint ( n ) if n > 0 else 0 return constructor ( next ( self . _data ) for _ in xrange ( n ) ) | Returns a container with the n first elements from the Stream or less if there aren t enough . Use this without args if you need only one element outside a list . |
21,351 | def skip ( self , n ) : def skipper ( data ) : for _ in xrange ( int ( round ( n ) ) ) : next ( data ) for el in data : yield el self . _data = skipper ( self . _data ) return self | Throws away the first n values from the Stream . |
21,352 | def limit ( self , n ) : data = self . _data self . _data = ( next ( data ) for _ in xrange ( int ( round ( n ) ) ) ) return self | Enforces the Stream to finish after n items . |
21,353 | def filter ( self , func ) : self . _data = xfilter ( func , self . _data ) return self | A lazy way to skip elements in the stream that gives False for the given function . |
21,354 | def accumulate ( iterable ) : " Return series of accumulated sums. " iterator = iter ( iterable ) sum_data = next ( iterator ) yield sum_data for el in iterator : sum_data += el yield sum_data | Return series of accumulated sums . |
21,355 | def tee ( data , n = 2 ) : if isinstance ( data , ( Stream , Iterator ) ) : return tuple ( Stream ( cp ) for cp in it . tee ( data , n ) ) else : return tuple ( data for unused in xrange ( n ) ) | Tee or T copy to help working with Stream instances as well as with numbers . |
21,356 | def factorial ( n ) : if isinstance ( n , float ) : if n . is_integer ( ) : n = int ( n ) if not isinstance ( n , INT_TYPES ) : raise TypeError ( "Non-integer input (perhaps you need Euler Gamma " "function or Gauss Pi function)" ) if n < 0 : raise ValueError ( "Input shouldn't be negative" ) return reduce ( operator . mul , it . takewhile ( lambda m : m <= n , it . count ( 2 ) ) , 1 ) | Factorial function that works with really big numbers . |
21,357 | def mgl_seq ( x ) : odd_numbers = thub ( count ( start = 1 , step = 2 ) , 2 ) return Stream ( 1 , - 1 ) * x ** odd_numbers / odd_numbers | Sequence whose sum is the Madhava - Gregory - Leibniz series . |
21,358 | def atan_mgl ( x , n = 10 ) : acc = 1 / ( 1 - z ** - 1 ) return acc ( mgl_seq ( x ) ) . skip ( n - 1 ) . take ( ) | Finds the arctan using the Madhava - Gregory - Leibniz series . |
21,359 | def lagrange ( pairs ) : prod = lambda args : reduce ( operator . mul , args ) xv , yv = xzip ( * pairs ) return lambda k : sum ( yv [ j ] * prod ( ( k - rk ) / ( rj - rk ) for rk in xv if rj != rk ) for j , rj in enumerate ( xv ) ) | Waring - Lagrange interpolator function . |
21,360 | def resample ( sig , old = 1 , new = 1 , order = 3 , zero = 0. ) : sig = Stream ( sig ) threshold = .5 * ( order + 1 ) step = old / new data = deque ( [ zero ] * ( order + 1 ) , maxlen = order + 1 ) data . extend ( sig . take ( rint ( threshold ) ) ) idx = int ( threshold ) isig = iter ( sig ) if isinstance ( step , Iterable ) : step = iter ( step ) while True : yield lagrange ( enumerate ( data ) ) ( idx ) idx += next ( step ) while idx > threshold : data . append ( next ( isig ) ) idx -= 1 else : while True : yield lagrange ( enumerate ( data ) ) ( idx ) idx += step while idx > threshold : data . append ( next ( isig ) ) idx -= 1 | Generic resampler based on Waring - Lagrange interpolators . |
21,361 | def is_polynomial ( self ) : return all ( isinstance ( k , INT_TYPES ) and k >= 0 for k in self . _data ) | Tells whether it is a linear combination of natural powers of x . |
21,362 | def order ( self ) : if not self . is_polynomial ( ) : raise AttributeError ( "Power needs to be positive integers" ) return max ( key for key in self . _data ) if self . _data else 0 | Finds the polynomial order . |
21,363 | def integrate ( self ) : if - 1 in self . _data : raise ValueError ( "Unable to integrate term that powers to -1" ) return Poly ( OrderedDict ( ( k + 1 , v / ( k + 1 ) ) for k , v in iteritems ( self . _data ) ) , zero = self . zero ) | Integrate without adding an integration constant . |
21,364 | def roots ( self ) : import numpy as np return np . roots ( list ( self . values ( ) ) [ : : - 1 ] ) . tolist ( ) | Returns a list with all roots . Needs Numpy . |
21,365 | def _exec_eval ( data , expr ) : ns = { } exec ( data , ns ) return eval ( expr , ns ) | Internal function to isolate an exec . Executes data and returns the expr evaluation afterwards . |
21,366 | def highpass ( cutoff ) : R = thub ( exp ( cutoff - pi ) , 2 ) return ( 1 - R ) / ( 1 + R * z ** - 1 ) | This strategy uses an exponential approximation for cut - off frequency calculation found by matching the one - pole Laplace lowpass filter and mirroring the resulting filter to get a highpass . |
21,367 | def lowpass ( cutoff ) : R = thub ( exp ( cutoff - pi ) , 2 ) G = ( R + 1 ) / 2 return G * ( 1 + z ** - 1 ) / ( 1 + R * z ** - 1 ) | This strategy uses an exponential approximation for cut - off frequency calculation found by matching the single pole and single zero Laplace highpass filter and mirroring the resulting filter to get a lowpass . |
21,368 | def highpass ( cutoff ) : R = thub ( exp ( - cutoff ) , 2 ) G = ( R + 1 ) / 2 return G * ( 1 - z ** - 1 ) / ( 1 - R * z ** - 1 ) | This strategy uses an exponential approximation for cut - off frequency calculation found by matching the single pole and single zero Laplace highpass filter . |
21,369 | def is_linear ( self ) : return all ( isinstance ( filt , LinearFilter ) or ( hasattr ( filt , "is_linear" ) and filt . is_linear ( ) ) for filt in self . callables ) | Tests whether all filters in the list are linear . CascadeFilter and ParallelFilter instances are also linear if all filters they group are linear . |
21,370 | def erb ( freq , Hz = None ) : if Hz is None : if freq < 7 : raise ValueError ( "Frequency out of range." ) Hz = 1 fHz = freq / Hz result = 6.23e-6 * fHz ** 2 + 93.39e-3 * fHz + 28.52 return result * Hz | B . C . J . Moore and B . R . Glasberg Suggested formulae for calculating auditory filter bandwidths and excitation patterns . J . Acoust . Soc . Am . 74 1983 pp . 750 - 753 . |
21,371 | def gammatone ( freq , bandwidth ) : bw = thub ( bandwidth , 1 ) bw2 = thub ( bw * 2 , 4 ) freq = thub ( freq , 4 ) resons = [ resonator . z_exp , resonator . poles_exp ] * 2 return CascadeFilter ( reson ( freq , bw2 ) for reson in resons ) | A . Klapuri Multipich Analysis of Polyphonic Music and Speech Signals Using an Auditory Model . IEEE Transactions on Audio Speech and Language Processing vol . 16 no . 2 2008 pp . 255 - 266 . |
21,372 | def _generate_window_strategies ( ) : for wnd_dict in window . _content_generation_table : names = wnd_dict [ "names" ] sname = wnd_dict [ "sname" ] = names [ 0 ] wnd_dict . setdefault ( "params_def" , "" ) for sdict in [ window , wsymm ] : docs_dict = window . _doc_kwargs ( symm = sdict is wsymm , ** wnd_dict ) decorators = [ format_docstring ( ** docs_dict ) , sdict . strategy ( * names ) ] ns = dict ( pi = pi , sin = sin , cos = cos , xrange = xrange , __name__ = __name__ ) exec ( sdict . _code_template . format ( ** wnd_dict ) , ns , ns ) reduce ( lambda func , dec : dec ( func ) , decorators , ns [ sname ] ) if not wnd_dict . get ( "distinct" , True ) : wsymm [ sname ] = window [ sname ] break wsymm [ sname ] . periodic = window [ sname ] . periodic = window [ sname ] wsymm [ sname ] . symm = window [ sname ] . symm = wsymm [ sname ] | Create all window and wsymm strategies |
21,373 | def acorr ( blk , max_lag = None ) : if max_lag is None : max_lag = len ( blk ) - 1 return [ sum ( blk [ n ] * blk [ n + tau ] for n in xrange ( len ( blk ) - tau ) ) for tau in xrange ( max_lag + 1 ) ] | Calculate the autocorrelation of a given 1 - D block sequence . |
21,374 | def lag_matrix ( blk , max_lag = None ) : if max_lag is None : max_lag = len ( blk ) - 1 elif max_lag >= len ( blk ) : raise ValueError ( "Block length should be higher than order" ) return [ [ sum ( blk [ n - i ] * blk [ n - j ] for n in xrange ( max_lag , len ( blk ) ) ) for i in xrange ( max_lag + 1 ) ] for j in xrange ( max_lag + 1 ) ] | Finds the lag matrix for a given 1 - D block sequence . |
21,375 | def dft ( blk , freqs , normalize = True ) : dft_data = ( sum ( xn * cexp ( - 1j * n * f ) for n , xn in enumerate ( blk ) ) for f in freqs ) if normalize : lblk = len ( blk ) return [ v / lblk for v in dft_data ] return list ( dft_data ) | Complex non - optimized Discrete Fourier Transform |
21,376 | def zcross ( seq , hysteresis = 0 , first_sign = 0 ) : neg_hyst = - hysteresis seq_iter = iter ( seq ) if first_sign == 0 : last_sign = 0 for el in seq_iter : yield 0 if ( el > hysteresis ) or ( el < neg_hyst ) : last_sign = - 1 if el < 0 else 1 break else : last_sign = - 1 if first_sign < 0 else 1 for el in seq_iter : if el * last_sign < neg_hyst : last_sign = - 1 if el < 0 else 1 yield 1 else : yield 0 | Zero - crossing stream . |
21,377 | def clip ( sig , low = - 1. , high = 1. ) : if low is None : if high is None : return Stream ( sig ) return Stream ( el if el < high else high for el in sig ) if high is None : return Stream ( el if el > low else low for el in sig ) if high < low : raise ValueError ( "Higher clipping limit is smaller than lower one" ) return Stream ( high if el > high else ( low if el < low else el ) for el in sig ) | Clips the signal up to both a lower and a higher limit . |
21,378 | def unwrap ( sig , max_delta = pi , step = 2 * pi ) : idata = iter ( sig ) d0 = next ( idata ) yield d0 delta = d0 - d0 for d1 in idata : d_diff = d1 - d0 if abs ( d_diff ) > max_delta : delta += - d_diff + min ( ( d_diff ) % step , ( d_diff ) % - step , key = lambda x : abs ( x ) ) yield d1 + delta d0 = d1 | Parametrized signal unwrapping . |
21,379 | def amdf ( lag , size ) : filt = ( 1 - z ** - lag ) . linearize ( ) @ tostream def amdf_filter ( sig , zero = 0. ) : return maverage ( size ) ( abs ( filt ( sig , zero = zero ) ) , zero = zero ) return amdf_filter | Average Magnitude Difference Function non - linear filter for a given size and a fixed lag . |
21,380 | def overlap_add ( blk_sig , size = None , hop = None , wnd = None , normalize = True ) : import numpy as np if size is None : blk_sig = Stream ( blk_sig ) size = len ( blk_sig . peek ( ) ) if hop is None : hop = size if wnd is None : wnd = np . ones ( size ) elif callable ( wnd ) and not isinstance ( wnd , Stream ) : wnd = wnd ( size ) if isinstance ( wnd , Sequence ) : wnd = np . array ( wnd ) elif isinstance ( wnd , Iterable ) : wnd = np . hstack ( wnd ) else : raise TypeError ( "Window should be an iterable or a callable" ) if normalize : steps = Stream ( wnd ) . blocks ( hop ) . map ( np . array ) gain = np . sum ( np . abs ( np . vstack ( steps ) ) , 0 ) . max ( ) if gain : wnd = wnd / gain old = np . zeros ( size ) for blk in ( wnd * blk for blk in blk_sig ) : blk [ : - hop ] += old [ hop : ] for el in blk [ : hop ] : yield el old = blk for el in old [ hop : ] : yield el | Overlap - add algorithm using Numpy arrays . |
21,381 | def overlap_add ( blk_sig , size = None , hop = None , wnd = None , normalize = True ) : if size is None : blk_sig = Stream ( blk_sig ) size = len ( blk_sig . peek ( ) ) if hop is None : hop = size if wnd is not None : if callable ( wnd ) and not isinstance ( wnd , Stream ) : wnd = wnd ( size ) if isinstance ( wnd , Iterable ) : wnd = list ( wnd ) else : raise TypeError ( "Window should be an iterable or a callable" ) if normalize : if wnd : steps = Stream ( wnd ) . map ( abs ) . blocks ( hop ) . map ( tuple ) gain = max ( xmap ( sum , xzip ( * steps ) ) ) if gain : wnd [ : ] = ( w / gain for w in wnd ) else : wnd = [ 1 / ceil ( size / hop ) ] * size if wnd : mul = operator . mul if len ( wnd ) != size : raise ValueError ( "Incompatible window size" ) wnd = wnd + [ 0. ] blk_sig = ( xmap ( mul , wnd , blk ) for blk in blk_sig ) add = operator . add mem = [ 0. ] * size s_h = size - hop for blk in xmap ( iter , blk_sig ) : mem [ : s_h ] = xmap ( add , mem [ hop : ] , blk ) mem [ s_h : ] = blk if len ( mem ) != size : raise ValueError ( "Wrong block size or declared" ) for el in mem [ : hop ] : yield el for el in mem [ hop : ] : yield el | Overlap - add algorithm using lists instead of Numpy arrays . The behavior is the same to the overlap_add . numpy strategy besides the data types . |
21,382 | def stft ( func = None , ** kwparams ) : from numpy . fft import fft , ifft return stft . base ( transform = fft , inverse_transform = ifft ) ( func , ** kwparams ) | Short Time Fourier Transform for complex data . |
21,383 | def stft ( func = None , ** kwparams ) : from numpy . fft import fft , ifft ifft_r = lambda * args : ifft ( * args ) . real return stft . base ( transform = fft , inverse_transform = ifft_r ) ( func , ** kwparams ) | Short Time Fourier Transform for real data keeping the full FFT block . |
21,384 | def close ( self ) : with self . halting : if not self . finished : self . finished = True while True : with self . lock : try : thread = self . _threads [ 0 ] except IndexError : break if not self . wait : thread . stop ( ) thread . join ( ) while self . _recordings : recst = self . _recordings [ - 1 ] recst . stop ( ) recst . take ( inf ) assert not self . _pa . _streams self . _pa . terminate ( ) | Destructor for this audio interface . Waits the threads to finish their streams if desired . |
21,385 | def record ( self , chunk_size = None , dfmt = "f" , channels = 1 , rate = DEFAULT_SAMPLE_RATE , ** kwargs ) : if chunk_size is None : chunk_size = chunks . size if hasattr ( self , "api" ) : kwargs . setdefault ( "input_device_index" , self . api [ "defaultInputDevice" ] ) channels = kwargs . pop ( "nchannels" , channels ) input_stream = RecStream ( self , self . _pa . open ( format = _STRUCT2PYAUDIO [ dfmt ] , channels = channels , rate = rate , frames_per_buffer = chunk_size , input = True , ** kwargs ) , chunk_size , dfmt ) self . _recordings . append ( input_stream ) return input_stream | Records audio from device into a Stream . |
21,386 | def run ( self ) : st = self . stream . _stream for chunk in chunks ( self . audio , size = self . chunk_size * self . nchannels , dfmt = self . dfmt ) : self . write_stream ( st , chunk , self . chunk_size , False ) if not self . go . is_set ( ) : self . stream . stop_stream ( ) if self . halting : break self . go . wait ( ) self . stream . start_stream ( ) with self . lock : if self in self . device_manager . _threads : self . stream . close ( ) self . device_manager . thread_finished ( self ) | Plays the audio . This method plays the audio and shouldn t be called explicitly let the constructor do so . |
21,387 | def stop ( self ) : with self . lock : self . halting = True self . go . clear ( ) | Stops the playing thread and close |
21,388 | def freq2midi ( freq ) : result = 12 * ( log2 ( freq ) - log2 ( FREQ_A4 ) ) + MIDI_A4 return nan if isinstance ( result , complex ) else result | Given a frequency in Hz returns its MIDI pitch number . |
21,389 | def octaves ( freq , fmin = 20. , fmax = 2e4 ) : if any ( f <= 0 for f in ( freq , fmin , fmax ) ) : raise ValueError ( "Frequencies have to be positive" ) while freq < fmin : freq *= 2 while freq > fmax : freq /= 2 if freq < fmin : return [ ] return list ( it . takewhile ( lambda x : x > fmin , ( freq * 2 ** harm for harm in it . count ( 0 , - 1 ) ) ) ) [ : : - 1 ] + list ( it . takewhile ( lambda x : x < fmax , ( freq * 2 ** harm for harm in it . count ( 1 ) ) ) ) | Given a frequency and a frequency range returns all frequencies in that range that is an integer number of octaves related to the given frequency . |
21,390 | def image_path_processor_factory ( path ) : def processor ( line ) : markup = ".. image::" if line . startswith ( markup ) : fname = line [ len ( markup ) : ] . strip ( ) if not ( fname . startswith ( "/" ) or "://" in fname ) : return "{} {}{}" . format ( markup , path , fname ) return line return processor | Processor for concatenating the path to relative path images |
21,391 | def delay ( sig ) : smix = Streamix ( ) sig = thub ( sig , 3 ) smix . add ( 0 , sig ) smix . add ( 280 * ms , .1 * sig ) smix . add ( 220 * ms , .1 * sig ) return smix | Simple feedforward delay effect |
21,392 | def note2snd ( pitch , quarters ) : dur = quarters * quarter_dur if pitch is None : return zeros ( dur ) freq = str2freq ( pitch ) * Hz return synth ( freq , dur ) | Creates an audio Stream object for a single note . |
21,393 | def find_full_name ( prefix , suffix = "rst" ) : return os . path . join ( os . path . split ( __file__ ) [ 0 ] , os . path . extsep . join ( [ prefix , suffix ] ) ) | Script path to actual path relative file name converter . |
21,394 | def save_to_rst ( prefix , data ) : with open ( find_full_name ( prefix ) , "w" ) as rst_file : rst_file . write ( full_gpl_for_rst ) rst_file . write ( data ) | Saves a RST file with the given prefix into the script file location . |
21,395 | def ks_synth ( freq ) : ks_mem = ( sum ( lz . sinusoid ( x * freq ) for x in [ 1 , 3 , 9 ] ) + lz . white_noise ( ) + lz . Stream ( - 1 , 1 ) ) / 5 return lz . karplus_strong ( freq , memory = ks_mem ) | Synthesize the given frequency into a Stream by using a model based on Karplus - Strong . |
21,396 | def m21_to_stream ( score , synth = ks_synth , beat = 90 , fdur = 2. , pad_dur = .5 , rate = lz . DEFAULT_SAMPLE_RATE ) : s , Hz = lz . sHz ( rate ) step = 60. / beat * s score = reduce ( operator . concat , [ [ ( pitch . frequency * Hz , note . offset * step , note . quarterLength * step , Fermata in note . expressions ) for pitch in note . pitches ] for note in score . flat . notes ] ) song = lz . Streamix ( ) last_start = 0 for freq , start , dur , has_fermata in score : delta = start - last_start if has_fermata : delta *= 2 song . add ( delta , synth ( freq ) . limit ( dur ) ) last_start = start song . add ( dur + pad_dur * s , lz . Stream ( [ ] ) ) return song | Converts Music21 data to a Stream object . |
21,397 | def pair_strings_sum_formatter ( a , b ) : if b [ : 1 ] == "-" : return "{0} - {1}" . format ( a , b [ 1 : ] ) return "{0} + {1}" . format ( a , b ) | Formats the sum of a and b . |
21,398 | def float_str ( value , symbol_str = "" , symbol_value = 1 , after = False , max_denominator = 1000000 ) : if value == 0 : return "0" frac = Fraction ( value / symbol_value ) . limit_denominator ( max_denominator ) num , den = frac . numerator , frac . denominator output_data = [ ] if num < 0 : num = - num output_data . append ( "-" ) if ( num != 1 ) or ( symbol_str == "" ) or after : output_data . append ( str ( num ) ) if ( value != 0 ) and not after : output_data . append ( symbol_str ) if den != 1 : output_data . extend ( [ "/" , str ( den ) ] ) if after : output_data . append ( symbol_str ) return "" . join ( output_data ) | Pretty rational string from float numbers . |
21,399 | def small_doc ( obj , indent = "" , max_width = 80 ) : if not getattr ( obj , "__doc__" , False ) : data = [ el . strip ( ) for el in str ( obj ) . splitlines ( ) ] if len ( data ) == 1 : if data [ 0 ] . startswith ( "<audiolazy.lazy_" ) : data = data [ 0 ] . split ( "0x" , - 1 ) [ 0 ] + "0x...>" else : data = "" . join ( [ "``" , data [ 0 ] , "``" ] ) else : data = " " . join ( data ) elif ( not obj . __doc__ ) or ( obj . __doc__ . strip ( ) == "" ) : data = "\ * * * * ...no docstring... * * * * \ " else : data = ( el . strip ( ) for el in obj . __doc__ . strip ( ) . splitlines ( ) ) data = " " . join ( it . takewhile ( lambda el : el != "" , data ) ) max_width -= len ( indent ) result = [ ] for word in data . split ( ) : if len ( word ) <= max_width : if result : if len ( result [ - 1 ] ) + len ( word ) + 1 <= max_width : word = " " . join ( [ result . pop ( ) , word ] ) result . append ( word ) else : result = [ word ] else : result . extend ( "" . join ( w ) for w in blocks ( word , max_width , padval = "" ) ) return [ indent + el for el in result ] | Finds a useful small doc representation of an object . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.