idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
23,900
def union_update ( self , other , ignore_conflicts = False ) : if not ignore_conflicts : ensure_compatible ( self , other ) self . _objects |= other . _objects self . _properties |= other . _properties self . _pairs |= other . _pairs
Update the definition with the union of the other .
64
10
23,901
def union ( self , other , ignore_conflicts = False ) : result = self . copy ( ) result . union_update ( other , ignore_conflicts ) return result
Return a new definition from the union of the definitions .
37
11
23,902
def intersection ( self , other , ignore_conflicts = False ) : result = self . copy ( ) result . intersection_update ( other , ignore_conflicts ) return result
Return a new definition from the intersection of the definitions .
37
11
23,903
def maximal ( iterable , comparison = operator . lt , _groupkey = operator . itemgetter ( 0 ) ) : iterable = set ( iterable ) if len ( iterable ) < 2 : return iterable return ( item for item , pairs in groupby ( permutations ( iterable , 2 ) , key = _groupkey ) if not any ( starmap ( comparison , pairs ) ) )
Yield the unique maximal elements from iterable using comparison .
87
12
23,904
def replace ( self , item , new_item ) : if new_item in self . _seen : raise ValueError ( '%r already in list' % new_item ) idx = self . _items . index ( item ) self . _seen . remove ( item ) self . _seen . add ( new_item ) self . _items [ idx ] = new_item
Replace an item preserving order .
82
7
23,905
def move ( self , item , new_index ) : idx = self . _items . index ( item ) if idx != new_index : item = self . _items . pop ( idx ) self . _items . insert ( new_index , item )
Move an item to the given position .
57
8
23,906
def issuperset ( self , items ) : return all ( _compat . map ( self . _seen . __contains__ , items ) )
Return whether this collection contains all items .
33
8
23,907
def rsub ( self , items ) : ignore = self . _seen seen = set ( ) add = seen . add items = [ i for i in items if i not in ignore and i not in seen and not add ( i ) ] return self . _fromargs ( seen , items )
Return order preserving unique items not in this collection .
61
10
23,908
def merge_schema ( first , second ) : if not ( type ( first ) == type ( second ) == dict ) : raise ValueError ( "Argument is not a schema" ) if not ( first . get ( 'type' ) == second . get ( 'type' ) == 'object' ) : raise NotImplementedError ( "Unsupported root type" ) return merge_objects ( first , second )
Returns the result of merging the two given schemas .
89
11
23,909
def generate_and_merge_schemas ( samples ) : merged = generate_schema_for_sample ( next ( iter ( samples ) ) ) for sample in samples : merged = merge_schema ( merged , generate_schema_for_sample ( sample ) ) return merged
Iterates through the given samples generating schemas and merging them returning the resulting merged schema .
62
18
23,910
def sine_psd ( data , delta , number_of_tapers = None , number_of_iterations = 2 , degree_of_smoothing = 1.0 , statistics = False , verbose = False ) : # Verbose mode on or off. if verbose is True : verbose = C . byref ( C . c_char ( 'y' ) ) else : verbose = None # Set the number of tapers so it can be read by the library. if number_of_tapers is None : number_of_tapers = 0 # initialize _MtspecType to save some space mt = _MtspecType ( "float32" ) # Transform the data to work with the library. data = np . require ( data , dtype = mt . float , requirements = [ mt . order ] ) # Some variables necessary to call the library. npts = len ( data ) number_of_frequency_bins = int ( npts / 2 ) + 1 # Create output arrays. frequency_bins = mt . empty ( number_of_frequency_bins ) spectrum = mt . empty ( number_of_frequency_bins ) # Create optional arrays or set to None. if statistics is True : # here an exception, mt sets the type float32, here we need int32 # that is do all the type and POINTER definition once by hand tapers_per_freq_point = np . empty ( number_of_frequency_bins , dtype = 'int32' , order = mt . order ) tapers_per_freq_point_p = tapers_per_freq_point . ctypes . data_as ( C . POINTER ( C . c_int ) ) errors = mt . empty ( ( number_of_frequency_bins , 2 ) ) else : tapers_per_freq_point_p = errors = None # Call the library. Fortran passes pointers! mtspeclib . sine_psd_ ( C . byref ( C . c_int ( npts ) ) , C . byref ( C . c_float ( delta ) ) , mt . p ( data ) , C . byref ( C . c_int ( number_of_tapers ) ) , C . byref ( C . c_int ( number_of_iterations ) ) , C . byref ( C . c_float ( degree_of_smoothing ) ) , C . byref ( C . c_int ( number_of_frequency_bins ) ) , mt . p ( frequency_bins ) , mt . p ( spectrum ) , tapers_per_freq_point_p , mt . p ( errors ) , verbose ) # Calculate return values. return_values = [ spectrum , frequency_bins ] if statistics is True : return_values . extend ( [ errors , tapers_per_freq_point ] ) return return_values
Wrapper method for the sine_psd subroutine in the library by German A . Prieto .
645
23
23,911
def dpss ( npts , fw , number_of_tapers , auto_spline = True , npts_max = None ) : mt = _MtspecType ( "float64" ) v = mt . empty ( ( npts , number_of_tapers ) ) lamb = mt . empty ( number_of_tapers ) theta = mt . empty ( number_of_tapers ) # Set auto_spline to True. if npts_max and npts_max < npts : auto_spline = True # Always set npts_max. else : npts_max = 200000 # Call either the spline routine or the normal routine. if auto_spline is True and npts > npts_max : mtspeclib . dpss_spline_ ( C . byref ( C . c_int ( npts_max ) ) , C . byref ( C . c_int ( npts ) ) , C . byref ( C . c_double ( fw ) ) , C . byref ( C . c_int ( number_of_tapers ) ) , mt . p ( v ) , mt . p ( lamb ) , mt . p ( theta ) ) else : mtspeclib . dpss_ ( C . byref ( C . c_int ( npts ) ) , C . byref ( C . c_double ( fw ) ) , C . byref ( C . c_int ( number_of_tapers ) ) , mt . p ( v ) , mt . p ( lamb ) , mt . p ( theta ) ) return ( v , lamb , theta )
Calculates DPSS also known as Slepian sequences or Slepian tapers .
380
19
23,912
def wigner_ville_spectrum ( data , delta , time_bandwidth = 3.5 , number_of_tapers = None , smoothing_filter = None , filter_width = 100 , frequency_divider = 1 , verbose = False ) : data = np . require ( data , 'float32' ) mt = _MtspecType ( "float32" ) npts = len ( data ) # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None : number_of_tapers = int ( 2 * time_bandwidth ) - 1 # Determine filter. if not smoothing_filter : smoothing_filter = 0 elif smoothing_filter == 'boxcar' : smoothing_filter = 1 elif smoothing_filter == 'gauss' : smoothing_filter = 2 else : msg = 'Invalid value for smoothing filter.' raise Exception ( msg ) # Verbose mode on or off. if verbose : verbose = C . byref ( C . c_char ( 'y' ) ) else : verbose = None # Allocate the output array # f90 code internally pads zeros to 2 * npts. That is we only return # every second frequency point, thus decrease the size of the array output = mt . empty ( ( npts // 2 // int ( frequency_divider ) + 1 , npts ) ) mtspeclib . wv_spec_to_array_ ( C . byref ( C . c_int ( npts ) ) , C . byref ( C . c_float ( delta ) ) , mt . p ( data ) , mt . p ( output ) , C . byref ( C . c_float ( time_bandwidth ) ) , C . byref ( C . c_int ( number_of_tapers ) ) , C . byref ( C . c_int ( smoothing_filter ) ) , C . byref ( C . c_float ( filter_width ) ) , C . byref ( C . c_int ( frequency_divider ) ) , verbose ) return output
Function to calculate the Wigner - Ville Distribution or Wigner - Ville Spectrum of a signal using multitaper spectral estimates .
468
28
23,913
def mt_deconvolve ( data_a , data_b , delta , nfft = None , time_bandwidth = None , number_of_tapers = None , weights = "adaptive" , demean = True , fmax = 0.0 ) : npts = len ( data_a ) if len ( data_b ) != npts : raise ValueError ( "Input arrays must have the same length!" ) if nfft is None : nfft = npts elif nfft < npts : raise ValueError ( "nfft must be larger then the number of samples in " "the array." ) # Deconvolution utilizes the 32bit version. mt = _MtspecType ( "float32" ) # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None : number_of_tapers = int ( 2 * time_bandwidth ) - 1 # Transform the data to work with the library. data_a = np . require ( data_a , mt . float , requirements = [ mt . order ] ) data_b = np . require ( data_b , mt . float , requirements = [ mt . order ] ) nf = nfft // 2 + 1 # Internally uses integers if demean : demean = 1 else : demean = 0 # iad = 0 are adaptive, iad = 1 are constant weight - this is # counter intuitive. if weights == "constant" : adaptive = 1 elif weights == "adaptive" : adaptive = 0 else : raise ValueError ( 'Weights must be either "adaptive" or "constant".' ) tfun = mt . empty ( nfft ) freq = mt . empty ( nf ) spec_ratio = mt . empty ( nf ) speci = mt . empty ( nf ) specj = mt . empty ( nf ) mtspeclib . mt_deconv_ ( C . byref ( C . c_int ( int ( npts ) ) ) , C . byref ( C . c_int ( int ( nfft ) ) ) , C . byref ( C . c_float ( float ( delta ) ) ) , mt . p ( data_a ) , mt . p ( data_b ) , C . byref ( C . c_float ( float ( time_bandwidth ) ) ) , C . byref ( C . c_int ( int ( number_of_tapers ) ) ) , C . byref ( C . c_int ( int ( nf ) ) ) , C . byref ( C . c_int ( adaptive ) ) , mt . p ( freq ) , mt . p ( tfun ) , mt . p ( spec_ratio ) , mt . p ( speci ) , mt . p ( specj ) , C . byref ( C . c_int ( demean ) ) , C . byref ( C . c_float ( fmax ) ) ) return { "frequencies" : freq , "deconvolved" : tfun , "spectral_ratio" : spec_ratio , "spectrum_a" : speci , "spectrum_b" : specj }
Deconvolve two time series using multitapers .
713
11
23,914
def empty ( self , shape , complex = False ) : if complex : return np . empty ( shape , dtype = self . complex , order = self . order ) return np . empty ( shape , dtype = self . float , order = self . order )
A wrapper around np . empty which automatically sets the correct type and returns an empty array .
55
18
23,915
def signal_bursts ( ) : np . random . seed ( 815 ) length = 5 * 512 # Baseline low frequency plus noise. data = np . sin ( np . linspace ( 0 , 80 * np . pi , length ) ) noise = np . random . ranf ( length ) noise /= noise . max ( ) noise /= 15 data += noise # Double last two fifths of the signal. data [ - 2 * 512 : ] *= 2.0 chirp1 = 2.5 * np . sin ( np . linspace ( 0 , 400 * np . pi , 512 ) ) chirp1 *= np . linspace ( 1 , 0 , 512 ) data [ 512 : 2 * 512 ] += chirp1 # Add second transient signal. chirp2 = 5.0 * np . sin ( np . linspace ( 0 , 200 * np . pi , 512 ) ) chirp2 *= np . linspace ( 1 , 0 , 512 ) data [ 3 * 512 : 4 * 512 ] += chirp2 return data
Generates a signal with two bursts inside . Useful for testing time frequency distributions .
234
16
23,916
def linear_chirp ( npts = 2000 ) : time = np . linspace ( 0 , 20 , npts ) chirp = np . sin ( 0.2 * np . pi * ( 0.1 + 24.0 / 2.0 * time ) * time ) return chirp
Generates a simple linear chirp .
68
9
23,917
def exponential_chirp ( npts = 2000 ) : time = np . linspace ( 0 , 20 , npts ) chirp = np . sin ( 2 * np . pi * 0.2 * ( 1.3 ** time - 1 ) / np . log ( 1.3 ) ) return chirp
Generates an exponential chirp .
71
8
23,918
def get_libgfortran_dir ( ) : for ending in [ ".3.dylib" , ".dylib" , ".3.so" , ".so" ] : try : p = Popen ( [ 'gfortran' , "-print-file-name=libgfortran" + ending ] , stdout = PIPE , stderr = PIPE ) p . stderr . close ( ) line = p . stdout . readline ( ) . decode ( ) . strip ( ) p . stdout . close ( ) if os . path . exists ( line ) : return [ os . path . dirname ( line ) ] except : continue return [ ]
Helper function returning the library directory of libgfortran . Useful on OSX where the C compiler oftentimes has no knowledge of the library directories of the Fortran compiler . I don t think it can do any harm on Linux .
146
47
23,919
def create ( cls , obj ) : self = cls . __new__ ( cls ) self . __proto__ = obj return self
Create a new prototype object with the argument as the source prototype .
31
13
23,920
def bind ( self , func ) : if self . __methods__ is None : self . __methods__ = { } self . __methods__ [ func . __name__ ] = BoundFunction ( func )
Take a function and create a bound method
46
8
23,921
def has_own_property ( self , attr ) : try : object . __getattribute__ ( self , attr ) except AttributeError : return False else : return True
Returns if the property
38
4
23,922
def add_router ( self , path , router ) : if self . strict_router_check and not isinstance ( router , Router ) : raise TypeError ( "Expected object of type Router, found %r" % type ( router ) ) log . info ( "{} Adding router {} on path {}" , id ( self ) , router , path ) self . middleware . add ( path = path , func = router , method_mask = HTTPMethod . ALL , )
Adds a router to the list of routers
105
8
23,923
def create_server ( self , loop = None , as_coroutine = False , protocol_factory = None , * * server_config ) : if loop is None : import asyncio loop = asyncio . get_event_loop ( ) if protocol_factory is None : from growler . aio import GrowlerHTTPProtocol protocol_factory = GrowlerHTTPProtocol . get_factory create_server = loop . create_server ( protocol_factory ( self , loop = loop ) , * * server_config ) if as_coroutine : return create_server else : return loop . run_until_complete ( create_server )
Helper function which constructs a listening server using the default growler . http . protocol . Protocol which responds to this app .
140
24
23,924
def create_server_and_run_forever ( self , loop = None , * * server_config ) : if loop is None : import asyncio loop = asyncio . get_event_loop ( ) self . create_server ( loop = loop , * * server_config ) try : loop . run_forever ( ) except KeyboardInterrupt : pass
Helper function which constructs an HTTP server and listens the loop forever .
77
13
23,925
def find_template_filename ( self , template_name ) : def next_file ( ) : filename = self . path / template_name yield filename try : exts = self . default_file_extensions except AttributeError : return strfilename = str ( filename ) for ext in exts : yield Path ( strfilename + ext ) for filename in next_file ( ) : if filename . is_file ( ) : return filename
Searches for a file matching the given template name .
92
12
23,926
def set_request_line ( self , method , url , version ) : self . parsed_request = ( method , url , version ) self . request = { 'method' : method , 'url' : url , 'version' : version }
Sets the request line on the responder .
52
10
23,927
def init_body_buffer ( self , method , headers ) : content_length = headers . get ( "CONTENT-LENGTH" , None ) if method in ( HTTPMethod . POST , HTTPMethod . PUT ) : if content_length is None : raise HTTPErrorBadRequest ( "HTTP Method requires a CONTENT-LENGTH header" ) self . content_length = int ( content_length ) self . body_buffer = bytearray ( 0 ) elif content_length is not None : raise HTTPErrorBadRequest ( "HTTP method %s may NOT have a CONTENT-LENGTH header" )
Sets up the body_buffer and content_length attributes based on method and headers .
137
18
23,928
def build_req_and_res ( self ) : req = self . build_req ( self , self . headers ) res = self . build_res ( self . _handler ) return req , res
Simple method which calls the request and response factories the responder was given and returns the pair .
43
19
23,929
def validate_and_store_body_data ( self , data ) : # add data to end of buffer self . body_buffer [ - 1 : ] = data # if len ( self . body_buffer ) > self . content_length : problem = "Content length exceeds expected value (%d > %d)" % ( len ( self . body_buffer ) , self . content_length ) raise HTTPErrorBadRequest ( phrase = problem )
Attempts simple body data validation by comparining incoming data to the content length header . If passes store the data into self . _buffer .
93
27
23,930
def begin_application ( self , req , res ) : # Add the middleware processing to the event loop - this *should* # change the call stack so any server errors do not link back to this # function self . loop . create_task ( self . http_application . handle_client_request ( req , res ) )
Entry point for the application middleware chain for an asyncio event loop .
69
15
23,931
def calculate_etag ( file_path ) : stat = file_path . stat ( ) etag = "%x-%x" % ( stat . st_mtime_ns , stat . st_size ) return etag
Calculate an etag value
49
7
23,932
def _set_default_headers ( self ) : self . headers . setdefault ( 'Date' , self . get_current_time ) self . headers . setdefault ( 'Server' , self . SERVER_INFO ) self . headers . setdefault ( 'Content-Length' , "%d" % len ( self . message ) ) if self . app . enabled ( 'x-powered-by' ) : self . headers . setdefault ( 'X-Powered-By' , 'Growler' )
Create some default headers that should be sent along with every HTTP response
110
13
23,933
def send_headers ( self ) : self . events . sync_emit ( 'headers' ) self . _set_default_headers ( ) header_str = self . status_line + self . EOL + str ( self . headers ) self . stream . write ( header_str . encode ( ) ) self . events . sync_emit ( 'after_headers' )
Sends the headers to the client
81
7
23,934
def end ( self ) : self . send_headers ( ) self . write ( ) self . write_eof ( ) self . has_ended = True
Ends the response . Useful for quickly ending connection with no data sent
33
14
23,935
def redirect ( self , url , status = None ) : self . status_code = 302 if status is None else status self . headers = Headers ( [ ( 'location' , url ) ] ) self . message = '' self . end ( )
Redirect to the specified url optional status code defaults to 302 .
52
13
23,936
def set ( self , header , value = None ) : if value is None : for k , v in header . items ( ) : self . headers [ k ] = v else : self . headers [ header ] = value
Set header to the value
46
5
23,937
def links ( self , links ) : s = [ '<{}>; rel="{}"' . format ( link , rel ) for link , rel in links . items ( ) ] self . headers [ 'Link' ] = ',' . join ( s )
Sets the Link
56
4
23,938
def send_file ( self , filename , status = 200 ) : if isinstance ( filename , Path ) and sys . version_info >= ( 3 , 5 ) : self . message = filename . read_bytes ( ) else : with io . FileIO ( str ( filename ) ) as f : self . message = f . read ( ) self . status_code = status self . send_headers ( ) self . write ( ) self . write_eof ( )
Reads in the file filename and sends bytes to client
98
11
23,939
def update ( self , * args , * * kwargs ) : for next_dict in chain ( args , ( kwargs , ) ) : for k , v in next_dict . items ( ) : self [ k ] = v
Equivalent to the python dict update method .
51
9
23,940
def add_header ( self , key , value , * * params ) : key = self . escape ( key ) ci_key = key . casefold ( ) def quoted_params ( items ) : for p in items : param_name = self . escape ( p [ 0 ] ) param_val = self . de_quote ( self . escape ( p [ 1 ] ) ) yield param_name , param_val sorted_items = sorted ( params . items ( ) ) quoted_iter = ( '%s="%s"' % p for p in quoted_params ( sorted_items ) ) param_str = ' ' . join ( quoted_iter ) if param_str : value = "%s; %s" % ( value , param_str ) self . _header_data [ ci_key ] = ( key , value )
Add a header to the collection including potential parameters .
179
10
23,941
def index ( req , res ) : number = req . session . get ( 'counter' , - 1 ) req . session [ 'counter' ] = int ( number ) + 1 print ( " -- Session '{id}' returned {counter} times" . format ( * * req . session ) ) msg = "Hello!! You've been here [[%s]] times" % ( req . session [ 'counter' ] ) res . send_text ( msg ) req . session . save ( )
Return root page of website .
105
6
23,942
async def body ( self ) : if not isinstance ( self . _body , bytes ) : self . _body = await self . _body return self . _body
A helper function which blocks until the body has been read completely . Returns the bytes of the body which the user should decode .
36
25
23,943
def event_emitter ( cls_ = None , * , events = ( '*' , ) ) : # create a dictionary from items in the 'events' parameter and with empty # lists as values event_dict = dict . fromkeys ( events , [ ] ) # if '*' was in the events tuple - then pop it out of the event_dict # and store the fact that we may allow any event name to be added to the # event emitter. allow_any_eventname = event_dict . pop ( '*' , False ) == [ ] def _event_emitter ( cls ) : def on ( self , name , callback ) : """ Add a callback to the event named 'name'. Returns the object for chained 'on' calls. """ if not ( callable ( callback ) or isawaitable ( callback ) ) : raise ValueError ( "Callback not callable: %r" % callback ) try : event_dict [ name ] . append ( callback ) except KeyError : if allow_any_eventname : event_dict [ name ] = [ callback ] else : msg = "Event Emitter has no event {!r}" . format ( name ) raise KeyError ( msg ) return self async def emit ( self , name ) : """ Coroutine which executes each of the callbacks added to the event identified by 'name' """ for cb in event_dict [ name ] : if isawaitable ( cb ) : await cb else : cb ( ) cls . on = on cls . emit = emit return cls if cls_ is None : return _event_emitter else : return _event_emitter ( cls_ )
A class - decorator which will add the specified events and the methods on and emit to the class .
361
21
23,944
def on ( self , name , _callback = None ) : # this is being used as a decorator if _callback is None : return lambda cb : self . on ( name , cb ) if not ( callable ( _callback ) or isawaitable ( _callback ) ) : msg = "Callback not callable: {0!r}" . format ( _callback ) raise ValueError ( msg ) self . _event_list [ name ] . append ( _callback ) return _callback
Add a callback to the event named name . Returns callback object for decorationable calls .
105
17
23,945
async def emit ( self , name ) : for cb in self . _event_list [ name ] : if isawaitable ( cb ) : await cb else : cb ( )
Add a callback to the event named name . Returns this object for chained on calls .
43
17
23,946
def routerify ( obj ) : router = Router ( ) for info in get_routing_attributes ( obj ) : router . add_route ( * info ) obj . __growler_router = router return router
Scan through attributes of object parameter looking for any which match a route signature . A router will be created and added to the object with parameter .
47
28
23,947
def _add_route ( self , method , path , middleware = None ) : if middleware is not None : self . add ( method , path , middleware ) return self else : # return a lambda that will return the 'func' argument return lambda func : ( self . add ( method , path , func ) , func ) [ 1 ]
The implementation of adding a route
73
6
23,948
def use ( self , middleware , path = None ) : self . log . info ( " Using middleware {}" , middleware ) if path is None : path = MiddlewareChain . ROOT_PATTERN self . add ( HTTPMethod . ALL , path , middleware ) return self
Call the provided middleware upon requests matching the path . If path is not provided or None all requests will match .
65
23
23,949
def sinatra_path_to_regex ( cls , path ) : # Return the path if already a (compiled) regex if type ( path ) is cls . regex_type : return path # Build a regular expression string which is split on the '/' character regex = [ "(?P<{}>\w+)" . format ( segment [ 1 : ] ) if cls . sinatra_param_regex . match ( segment ) else segment for segment in path . split ( '/' ) ] return re . compile ( '/' . join ( regex ) )
Converts a sinatra - style path to a regex with named parameters .
123
15
23,950
def _parse_and_store_headers ( self ) : header_storage = self . _store_header ( ) header_storage . send ( None ) for header_line in self . _next_header_line ( ) : if header_line is None : self . _buffer += yield continue else : header_storage . send ( header_line ) self . headers = header_storage . send ( None )
Coroutine used retrieve header data and parse each header until the body is found .
87
16
23,951
def _store_header ( self ) : key , value = None , None headers = [ ] header_line = yield while header_line is not None : if not header_line . startswith ( ( b' ' , b'\t' ) ) : if key : headers . append ( ( key , value ) ) key , value = self . split_header_key_value ( header_line ) key = key . upper ( ) else : next_val = header_line . strip ( ) . decode ( ) if isinstance ( value , list ) : value . append ( next_val ) else : value = [ value , next_val ] header_line = yield if key is not None : headers . append ( ( key , value ) ) yield dict ( headers )
Logic & state behind storing headers . This is a coroutine that should be sent header lines in the usual fashion . Sending it None will indicate there are no more lines and the dictionary of headers will be returned .
165
43
23,952
def _store_request_line ( self , req_line ) : if not isinstance ( req_line , str ) : try : req_line = self . raw_request_line = req_line . decode ( ) except UnicodeDecodeError : raise HTTPErrorBadRequest try : self . method_str , self . original_url , self . version = req_line . split ( ) except ValueError : raise HTTPErrorBadRequest ( ) if self . version not in ( 'HTTP/1.1' , 'HTTP/1.0' ) : raise HTTPErrorVersionNotSupported ( self . version ) # allow lowercase methodname? # self.method_str = self.method_str.upper() # save 'method' and get the correct function to finish processing try : self . method = HTTPMethod [ self . method_str ] except KeyError : # Method not found err = "Unknown HTTP Method '{}'" . format ( self . method_str ) raise HTTPErrorNotImplemented ( err ) self . _process_headers = { HTTPMethod . GET : self . process_get_headers , HTTPMethod . POST : self . process_post_headers } . get ( self . method , lambda data : True ) _ , num_str = self . version . split ( '/' , 1 ) self . HTTP_VERSION = tuple ( num_str . split ( '.' ) ) self . version_number = float ( num_str ) self . parsed_url = urlparse ( self . original_url ) self . path = unquote ( self . parsed_url . path ) self . query = parse_qs ( self . parsed_url . query ) return self . method , self . parsed_url , self . version
Splits the request line given into three components . Ensures that the version and method are valid for this server and uses the urllib . parse function to parse the request URI .
379
37
23,953
def determine_newline ( data ) : line_end_pos = data . find ( b'\n' ) if line_end_pos == - 1 : return None elif line_end_pos == 0 : return b'\n' prev_char = data [ line_end_pos - 1 ] return b'\r\n' if ( prev_char is b'\r' [ 0 ] ) else b'\n'
Looks for a newline character in bytestring parameter data . Currently only looks for strings \ r \ n \ n . If \ n is found at the first position of the string this raises an exception .
96
42
23,954
def path_split ( self , path ) : match = self . path . match ( path ) if match is None : return None , None # split string at position the_rest = path [ match . end ( ) : ] # ensure we split at a '/' character if the_rest : if match . group ( ) . endswith ( '/' ) : pass elif the_rest . startswith ( '/' ) : pass else : return None , None if self . IGNORE_TRAILING_SLASH and the_rest == '/' : the_rest = '' return match , the_rest
Splits a path into the part matching this middleware and the part remaining . If path does not exist it returns a pair of None values . If the regex matches the entire pair the second item in returned tuple is None .
130
45
23,955
def find_matching_middleware ( self , method , path ) : for mw in self . mw_list : if not mw . matches_method ( method ) : continue # get the path matching this middleware and the 'rest' of the url # (i.e. the part that comes AFTER the match) to be potentially # matched later by a subchain path_match , rest_url = mw . path_split ( path ) if self . should_skip_middleware ( mw , path_match , rest_url ) : continue yield mw , path_match , rest_url
Iterator handling the matching of middleware against a method + path pair . Yields the middleware and the
131
22
23,956
def add ( self , method_mask , path , func ) : is_err = len ( signature ( func ) . parameters ) == 3 is_subchain = isinstance ( func , MiddlewareChain ) tup = MiddlewareNode ( func = func , mask = method_mask , path = path , is_errorhandler = is_err , is_subchain = is_subchain , ) self . mw_list . append ( tup )
Add a function to the middleware chain . This function is returned when iterating over the chain with matching method and path .
96
25
23,957
def count_all ( self ) : return sum ( x . func . count_all ( ) if x . is_subchain else 1 for x in self )
Returns the total number of middleware in this chain and subchains .
34
14
23,958
def if_relationship ( parser , token ) : bits = list ( token . split_contents ( ) ) if len ( bits ) != 4 : raise TemplateSyntaxError ( "%r takes 3 arguments:\n%s" % ( bits [ 0 ] , if_relationship . __doc__ ) ) end_tag = 'end' + bits [ 0 ] nodelist_true = parser . parse ( ( 'else' , end_tag ) ) token = parser . next_token ( ) if token . contents == 'else' : nodelist_false = parser . parse ( ( end_tag , ) ) parser . delete_first_token ( ) else : nodelist_false = template . NodeList ( ) return IfRelationshipNode ( nodelist_true , nodelist_false , * bits [ 1 : ] )
Determine if a certain type of relationship exists between two users . The status parameter must be a slug matching either the from_slug to_slug or symmetrical_slug of a RelationshipStatus .
175
43
23,959
def add_relationship_url ( user , status ) : if isinstance ( status , RelationshipStatus ) : status = status . from_slug return reverse ( 'relationship_add' , args = [ user . username , status ] )
Generate a url for adding a relationship on a given user . user is a User object and status is either a relationship_status object or a string denoting a RelationshipStatus
51
35
23,960
def _rename_glyphs_from_ufo ( self ) : rename_map = self . _build_production_names ( ) otf = self . otf otf . setGlyphOrder ( [ rename_map . get ( n , n ) for n in otf . getGlyphOrder ( ) ] ) # we need to compile format 2 'post' table so that the 'extraNames' # attribute is updated with the list of the names outside the # standard Macintosh glyph order; otherwise, if one dumps the font # to TTX directly before compiling first, the post table will not # contain the extraNames. if 'post' in otf and otf [ 'post' ] . formatType == 2.0 : otf [ 'post' ] . compile ( self . otf ) if 'CFF ' in otf : cff = otf [ 'CFF ' ] . cff . topDictIndex [ 0 ] char_strings = cff . CharStrings . charStrings cff . CharStrings . charStrings = { rename_map . get ( n , n ) : v for n , v in char_strings . items ( ) } cff . charset = [ rename_map . get ( n , n ) for n in cff . charset ]
Rename glyphs using ufo . lib . public . postscriptNames in UFO .
282
18
23,961
def _unique_name ( name , seen ) : if name in seen : n = seen [ name ] while ( name + ".%d" % n ) in seen : n += 1 seen [ name ] = n + 1 name += ".%d" % n seen [ name ] = 1 return name
Append incremental . N suffix if glyph is a duplicate .
63
12
23,962
def _build_production_name ( self , glyph ) : # use PostScript names from UFO lib if available if self . _postscriptNames : production_name = self . _postscriptNames . get ( glyph . name ) return production_name if production_name else glyph . name # use name derived from unicode value unicode_val = glyph . unicode if glyph . unicode is not None : return '%s%04X' % ( 'u' if unicode_val > 0xffff else 'uni' , unicode_val ) # use production name + last (non-script) suffix if possible parts = glyph . name . rsplit ( '.' , 1 ) if len ( parts ) == 2 and parts [ 0 ] in self . glyphSet : return '%s.%s' % ( self . _build_production_name ( self . glyphSet [ parts [ 0 ] ] ) , parts [ 1 ] ) # use ligature name, making sure to look up components with suffixes parts = glyph . name . split ( '.' , 1 ) if len ( parts ) == 2 : liga_parts = [ '%s.%s' % ( n , parts [ 1 ] ) for n in parts [ 0 ] . split ( '_' ) ] else : liga_parts = glyph . name . split ( '_' ) if len ( liga_parts ) > 1 and all ( n in self . glyphSet for n in liga_parts ) : unicode_vals = [ self . glyphSet [ n ] . unicode for n in liga_parts ] if all ( v and v <= 0xffff for v in unicode_vals ) : return 'uni' + '' . join ( '%04X' % v for v in unicode_vals ) return '_' . join ( self . _build_production_name ( self . glyphSet [ n ] ) for n in liga_parts ) return glyph . name
Build a production name for a single glyph .
421
9
23,963
def makeFeaClassName ( name , existingClassNames = None ) : name = re . sub ( r"[^A-Za-z0-9._]" , r"" , name ) if existingClassNames is None : return name i = 1 origName = name while name in existingClassNames : name = "%s_%d" % ( origName , i ) i += 1 return name
Make a glyph class name which is legal to use in feature text .
85
14
23,964
def addLookupReference ( feature , lookup , script = None , languages = None , exclude_dflt = False ) : return addLookupReferences ( feature , ( lookup , ) , script = script , languages = languages , exclude_dflt = exclude_dflt , )
Shortcut for addLookupReferences but for a single lookup .
58
13
23,965
def openTypeHeadCreatedFallback ( info ) : if "SOURCE_DATE_EPOCH" in os . environ : t = datetime . utcfromtimestamp ( int ( os . environ [ "SOURCE_DATE_EPOCH" ] ) ) return t . strftime ( _date_format ) else : return dateStringForNow ( )
Fallback to the environment variable SOURCE_DATE_EPOCH if set otherwise now .
79
20
23,966
def preflightInfo ( info ) : missingRequired = set ( ) missingRecommended = set ( ) for attr in requiredAttributes : if not hasattr ( info , attr ) or getattr ( info , attr ) is None : missingRequired . add ( attr ) for attr in recommendedAttributes : if not hasattr ( info , attr ) or getattr ( info , attr ) is None : missingRecommended . add ( attr ) return dict ( missingRequired = missingRequired , missingRecommended = missingRecommended )
Returns a dict containing two items . The value for each item will be a list of info attribute names .
109
21
23,967
def add ( self , user , status = None , symmetrical = False ) : if not status : status = RelationshipStatus . objects . following ( ) relationship , created = Relationship . objects . get_or_create ( from_user = self . instance , to_user = user , status = status , site = Site . objects . get_current ( ) ) if symmetrical : return ( relationship , user . relationships . add ( self . instance , status , False ) ) else : return relationship
Add a relationship from one user to another with the given status which defaults to following .
102
17
23,968
def remove ( self , user , status = None , symmetrical = False ) : if not status : status = RelationshipStatus . objects . following ( ) res = Relationship . objects . filter ( from_user = self . instance , to_user = user , status = status , site__pk = settings . SITE_ID ) . delete ( ) if symmetrical : return ( res , user . relationships . remove ( self . instance , status , False ) ) else : return res
Remove a relationship from one user to another with the same caveats and behavior as adding a relationship .
100
19
23,969
def get_relationships ( self , status , symmetrical = False ) : query = self . _get_from_query ( status ) if symmetrical : query . update ( self . _get_to_query ( status ) ) return User . objects . filter ( * * query )
Returns a QuerySet of user objects with which the given user has established a relationship .
60
17
23,970
def only_to ( self , status ) : from_relationships = self . get_relationships ( status ) to_relationships = self . get_related_to ( status ) return to_relationships . exclude ( pk__in = from_relationships . values_list ( 'pk' ) )
Returns a QuerySet of user objects who have created a relationship to the given user but which the given user has not reciprocated
67
25
23,971
def makeOfficialGlyphOrder ( font , glyphOrder = None ) : if glyphOrder is None : glyphOrder = getattr ( font , "glyphOrder" , ( ) ) names = set ( font . keys ( ) ) order = [ ] if ".notdef" in names : names . remove ( ".notdef" ) order . append ( ".notdef" ) for name in glyphOrder : if name not in names : continue names . remove ( name ) order . append ( name ) order . extend ( sorted ( names ) ) return order
Make the final glyph order for font .
115
8
23,972
def from_layer ( cls , font , layerName = None , copy = False , skipExportGlyphs = None ) : if layerName is not None : layer = font . layers [ layerName ] else : layer = font . layers . defaultLayer if copy : self = _copyLayer ( layer , obj_type = cls ) self . lib = deepcopy ( layer . lib ) else : self = cls ( ( g . name , g ) for g in layer ) self . lib = layer . lib # If any glyphs in the skipExportGlyphs list are used as components, decompose # them in the containing glyphs... if skipExportGlyphs : for glyph in self . values ( ) : if any ( c . baseGlyph in skipExportGlyphs for c in glyph . components ) : deepCopyContours ( self , glyph , glyph , Transform ( ) , skipExportGlyphs ) if hasattr ( glyph , "removeComponent" ) : # defcon for c in [ component for component in glyph . components if component . baseGlyph in skipExportGlyphs ] : glyph . removeComponent ( c ) else : # ufoLib2 glyph . components [ : ] = [ c for c in glyph . components if c . baseGlyph not in skipExportGlyphs ] # ... and then remove them from the glyph set, if even present. for glyph_name in skipExportGlyphs : if glyph_name in self : del self [ glyph_name ] self . name = layer . name if layerName is not None else None return self
Return a mapping of glyph names to glyph objects from font .
347
12
23,973
def parseLayoutFeatures ( font ) : featxt = tounicode ( font . features . text or "" , "utf-8" ) if not featxt : return ast . FeatureFile ( ) buf = UnicodeIO ( featxt ) # the path is used by the lexer to resolve 'include' statements # and print filename in error messages. For the UFO spec, this # should be the path of the UFO, not the inner features.fea: # https://github.com/unified-font-object/ufo-spec/issues/55 ufoPath = font . path if ufoPath is not None : buf . name = ufoPath glyphNames = set ( font . keys ( ) ) try : parser = Parser ( buf , glyphNames ) doc = parser . parse ( ) except IncludedFeaNotFound as e : if ufoPath and os . path . exists ( os . path . join ( ufoPath , e . args [ 0 ] ) ) : logger . warning ( "Please change the file name in the include(...); " "statement to be relative to the UFO itself, " "instead of relative to the 'features.fea' file " "contained in it." ) raise return doc
Parse OpenType layout features in the UFO and return a feaLib . ast . FeatureFile instance .
260
22
23,974
def setupFeatures ( self ) : if self . featureWriters : featureFile = parseLayoutFeatures ( self . ufo ) for writer in self . featureWriters : writer . write ( self . ufo , featureFile , compiler = self ) # stringify AST to get correct line numbers in error messages self . features = featureFile . asFea ( ) else : # no featureWriters, simply read existing features' text self . features = tounicode ( self . ufo . features . text or "" , "utf-8" )
Make the features source .
115
5
23,975
def buildTables ( self ) : if not self . features : return # the path is used by the lexer to follow 'include' statements; # if we generated some automatic features, includes have already been # resolved, and we work from a string which does't exist on disk path = self . ufo . path if not self . featureWriters else None try : addOpenTypeFeaturesFromString ( self . ttFont , self . features , filename = path ) except FeatureLibError : if path is None : # if compilation fails, create temporary file for inspection data = tobytes ( self . features , encoding = "utf-8" ) with NamedTemporaryFile ( delete = False ) as tmp : tmp . write ( data ) logger . error ( "Compilation failed! Inspect temporary file: %r" , tmp . name ) raise
Compile OpenType feature tables from the source . Raises a FeaLibError if the feature compilation was unsuccessful .
176
24
23,976
def maxCtxFont ( font ) : maxCtx = 0 for tag in ( 'GSUB' , 'GPOS' ) : if tag not in font : continue table = font [ tag ] . table if table . LookupList is None : continue for lookup in table . LookupList . Lookup : for st in lookup . SubTable : maxCtx = maxCtxSubtable ( maxCtx , tag , lookup . LookupType , st ) return maxCtx
Calculate the usMaxContext value for an entire font .
102
13
23,977
def maxCtxContextualSubtable ( maxCtx , st , ruleType , chain = '' ) : if st . Format == 1 : for ruleset in getattr ( st , '%s%sRuleSet' % ( chain , ruleType ) ) : if ruleset is None : continue for rule in getattr ( ruleset , '%s%sRule' % ( chain , ruleType ) ) : if rule is None : continue maxCtx = maxCtxContextualRule ( maxCtx , rule , chain ) elif st . Format == 2 : for ruleset in getattr ( st , '%s%sClassSet' % ( chain , ruleType ) ) : if ruleset is None : continue for rule in getattr ( ruleset , '%s%sClassRule' % ( chain , ruleType ) ) : if rule is None : continue maxCtx = maxCtxContextualRule ( maxCtx , rule , chain ) elif st . Format == 3 : maxCtx = maxCtxContextualRule ( maxCtx , st , chain ) return maxCtx
Calculate usMaxContext based on a contextual feature subtable .
236
14
23,978
def maxCtxContextualRule ( maxCtx , st , chain ) : if not chain : return max ( maxCtx , st . GlyphCount ) elif chain == 'Reverse' : return max ( maxCtx , st . GlyphCount + st . LookAheadGlyphCount ) return max ( maxCtx , st . InputGlyphCount + st . LookAheadGlyphCount )
Calculate usMaxContext based on a contextual feature rule .
92
13
23,979
def compileOTF ( ufo , preProcessorClass = OTFPreProcessor , outlineCompilerClass = OutlineOTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , optimizeCFF = CFFOptimization . SUBROUTINIZE , roundTolerance = None , removeOverlaps = False , overlapsBackend = None , inplace = False , layerName = None , skipExportGlyphs = None , _tables = None , ) : logger . info ( "Pre-processing glyphs" ) if skipExportGlyphs is None : skipExportGlyphs = ufo . lib . get ( "public.skipExportGlyphs" , [ ] ) preProcessor = preProcessorClass ( ufo , inplace = inplace , removeOverlaps = removeOverlaps , overlapsBackend = overlapsBackend , layerName = layerName , skipExportGlyphs = skipExportGlyphs , ) glyphSet = preProcessor . process ( ) logger . info ( "Building OpenType tables" ) optimizeCFF = CFFOptimization ( optimizeCFF ) outlineCompiler = outlineCompilerClass ( ufo , glyphSet = glyphSet , glyphOrder = glyphOrder , roundTolerance = roundTolerance , optimizeCFF = optimizeCFF >= CFFOptimization . SPECIALIZE , tables = _tables , ) otf = outlineCompiler . compile ( ) # Only the default layer is likely to have all glyphs used in feature code. if layerName is None : compileFeatures ( ufo , otf , glyphSet = glyphSet , featureWriters = featureWriters , featureCompilerClass = featureCompilerClass , ) postProcessor = PostProcessor ( otf , ufo , glyphSet = glyphSet ) otf = postProcessor . process ( useProductionNames , optimizeCFF = optimizeCFF >= CFFOptimization . SUBROUTINIZE , ) return otf
Create FontTools CFF font from a UFO .
442
10
23,980
def compileTTF ( ufo , preProcessorClass = TTFPreProcessor , outlineCompilerClass = OutlineTTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , convertCubics = True , cubicConversionError = None , reverseDirection = True , rememberCurveType = True , removeOverlaps = False , overlapsBackend = None , inplace = False , layerName = None , skipExportGlyphs = None , ) : logger . info ( "Pre-processing glyphs" ) if skipExportGlyphs is None : skipExportGlyphs = ufo . lib . get ( "public.skipExportGlyphs" , [ ] ) preProcessor = preProcessorClass ( ufo , inplace = inplace , removeOverlaps = removeOverlaps , overlapsBackend = overlapsBackend , convertCubics = convertCubics , conversionError = cubicConversionError , reverseDirection = reverseDirection , rememberCurveType = rememberCurveType , layerName = layerName , skipExportGlyphs = skipExportGlyphs , ) glyphSet = preProcessor . process ( ) logger . info ( "Building OpenType tables" ) outlineCompiler = outlineCompilerClass ( ufo , glyphSet = glyphSet , glyphOrder = glyphOrder ) otf = outlineCompiler . compile ( ) # Only the default layer is likely to have all glyphs used in feature code. if layerName is None : compileFeatures ( ufo , otf , glyphSet = glyphSet , featureWriters = featureWriters , featureCompilerClass = featureCompilerClass , ) postProcessor = PostProcessor ( otf , ufo , glyphSet = glyphSet ) otf = postProcessor . process ( useProductionNames ) return otf
Create FontTools TrueType font from a UFO .
407
10
23,981
def compileInterpolatableTTFs ( ufos , preProcessorClass = TTFInterpolatablePreProcessor , outlineCompilerClass = OutlineTTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , cubicConversionError = None , reverseDirection = True , inplace = False , layerNames = None , skipExportGlyphs = None , ) : from ufo2ft . util import _LazyFontName if layerNames is None : layerNames = [ None ] * len ( ufos ) assert len ( ufos ) == len ( layerNames ) if skipExportGlyphs is None : skipExportGlyphs = set ( ) for ufo in ufos : skipExportGlyphs . update ( ufo . lib . get ( "public.skipExportGlyphs" , [ ] ) ) logger . info ( "Pre-processing glyphs" ) preProcessor = preProcessorClass ( ufos , inplace = inplace , conversionError = cubicConversionError , reverseDirection = reverseDirection , layerNames = layerNames , skipExportGlyphs = skipExportGlyphs , ) glyphSets = preProcessor . process ( ) for ufo , glyphSet , layerName in zip ( ufos , glyphSets , layerNames ) : fontName = _LazyFontName ( ufo ) if layerName is not None : logger . info ( "Building OpenType tables for %s-%s" , fontName , layerName ) else : logger . info ( "Building OpenType tables for %s" , fontName ) outlineCompiler = outlineCompilerClass ( ufo , glyphSet = glyphSet , glyphOrder = glyphOrder , tables = SPARSE_TTF_MASTER_TABLES if layerName else None , ) ttf = outlineCompiler . compile ( ) # Only the default layer is likely to have all glyphs used in feature # code. if layerName is None : compileFeatures ( ufo , ttf , glyphSet = glyphSet , featureWriters = featureWriters , featureCompilerClass = featureCompilerClass , ) postProcessor = PostProcessor ( ttf , ufo , glyphSet = glyphSet ) ttf = postProcessor . process ( useProductionNames ) if layerName is not None : # for sparse masters (i.e. containing only a subset of the glyphs), we # need to include the post table in order to store glyph names, so that # fontTools.varLib can interpolate glyphs with same name across masters. # However we want to prevent the underlinePosition/underlineThickness # fields in such sparse masters to be included when computing the deltas # for the MVAR table. Thus, we set them to this unlikely, limit value # (-36768) which is a signal varLib should ignore them when building MVAR. ttf [ "post" ] . underlinePosition = - 0x8000 ttf [ "post" ] . underlineThickness = - 0x8000 yield ttf
Create FontTools TrueType fonts from a list of UFOs with interpolatable outlines . Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm .
676
36
23,982
def compileInterpolatableTTFsFromDS ( designSpaceDoc , preProcessorClass = TTFInterpolatablePreProcessor , outlineCompilerClass = OutlineTTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , cubicConversionError = None , reverseDirection = True , inplace = False , ) : ufos , layerNames = [ ] , [ ] for source in designSpaceDoc . sources : if source . font is None : raise AttributeError ( "designspace source '%s' is missing required 'font' attribute" % getattr ( source , "name" , "<Unknown>" ) ) ufos . append ( source . font ) # 'layerName' is None for the default layer layerNames . append ( source . layerName ) skipExportGlyphs = designSpaceDoc . lib . get ( "public.skipExportGlyphs" , [ ] ) ttfs = compileInterpolatableTTFs ( ufos , preProcessorClass = preProcessorClass , outlineCompilerClass = outlineCompilerClass , featureCompilerClass = featureCompilerClass , featureWriters = featureWriters , glyphOrder = glyphOrder , useProductionNames = useProductionNames , cubicConversionError = cubicConversionError , reverseDirection = reverseDirection , inplace = inplace , layerNames = layerNames , skipExportGlyphs = skipExportGlyphs , ) if inplace : result = designSpaceDoc else : # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc . __class__ . fromstring ( designSpaceDoc . tostring ( ) ) for source , ttf in zip ( result . sources , ttfs ) : source . font = ttf return result
Create FontTools TrueType fonts from the DesignSpaceDocument UFO sources with interpolatable outlines . Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm .
398
38
23,983
def compileInterpolatableOTFsFromDS ( designSpaceDoc , preProcessorClass = OTFPreProcessor , outlineCompilerClass = OutlineOTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , roundTolerance = None , inplace = False , ) : for source in designSpaceDoc . sources : if source . font is None : raise AttributeError ( "designspace source '%s' is missing required 'font' attribute" % getattr ( source , "name" , "<Unknown>" ) ) skipExportGlyphs = designSpaceDoc . lib . get ( "public.skipExportGlyphs" , [ ] ) otfs = [ ] for source in designSpaceDoc . sources : otfs . append ( compileOTF ( ufo = source . font , layerName = source . layerName , preProcessorClass = preProcessorClass , outlineCompilerClass = outlineCompilerClass , featureCompilerClass = featureCompilerClass , featureWriters = featureWriters , glyphOrder = glyphOrder , useProductionNames = useProductionNames , optimizeCFF = CFFOptimization . NONE , roundTolerance = roundTolerance , removeOverlaps = False , overlapsBackend = None , inplace = inplace , skipExportGlyphs = skipExportGlyphs , _tables = SPARSE_OTF_MASTER_TABLES if source . layerName else None , ) ) if inplace : result = designSpaceDoc else : # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc . __class__ . fromstring ( designSpaceDoc . tostring ( ) ) for source , otf in zip ( result . sources , otfs ) : source . font = otf return result
Create FontTools CFF fonts from the DesignSpaceDocument UFO sources with interpolatable outlines .
402
18
23,984
def compileFeatures ( ufo , ttFont = None , glyphSet = None , featureWriters = None , featureCompilerClass = None , ) : if featureCompilerClass is None : if any ( fn . startswith ( MTI_FEATURES_PREFIX ) and fn . endswith ( ".mti" ) for fn in ufo . data . fileNames ) : featureCompilerClass = MtiFeatureCompiler else : featureCompilerClass = FeatureCompiler featureCompiler = featureCompilerClass ( ufo , ttFont , glyphSet = glyphSet , featureWriters = featureWriters ) return featureCompiler . compile ( )
Compile OpenType Layout features from ufo into FontTools OTL tables . If ttFont is None a new TTFont object is created containing the new tables else the provided ttFont is updated with the new tables .
142
47
23,985
def _propagate_glyph_anchors ( glyphSet , composite , processed ) : if composite . name in processed : return processed . add ( composite . name ) if not composite . components : return base_components = [ ] mark_components = [ ] anchor_names = set ( ) to_add = { } for component in composite . components : try : glyph = glyphSet [ component . baseGlyph ] except KeyError : logger . warning ( 'Anchors not propagated for inexistent component {} ' 'in glyph {}' . format ( component . baseGlyph , composite . name ) ) else : _propagate_glyph_anchors ( glyphSet , glyph , processed ) if any ( a . name . startswith ( '_' ) for a in glyph . anchors ) : mark_components . append ( component ) else : base_components . append ( component ) anchor_names |= { a . name for a in glyph . anchors } if mark_components and not base_components and _is_ligature_mark ( composite ) : # The composite is a mark that is composed of other marks (E.g. # "circumflexcomb_tildecomb"). Promote the mark that is positioned closest # to the origin to a base. try : component = _component_closest_to_origin ( mark_components , glyphSet ) except Exception as e : raise Exception ( "Error while determining which component of composite " "'{}' is the lowest: {}" . format ( composite . name , str ( e ) ) ) mark_components . remove ( component ) base_components . append ( component ) glyph = glyphSet [ component . baseGlyph ] anchor_names |= { a . name for a in glyph . anchors } for anchor_name in anchor_names : # don't add if composite glyph already contains this anchor OR any # associated ligature anchors (e.g. "top_1, top_2" for "top") if not any ( a . name . startswith ( anchor_name ) for a in composite . anchors ) : _get_anchor_data ( to_add , glyphSet , base_components , anchor_name ) for component in mark_components : _adjust_anchors ( to_add , glyphSet , component ) # we sort propagated anchors to append in a deterministic order for name , ( x , y ) in sorted ( to_add . items ( ) ) : anchor_dict = { 'name' : name , 'x' : x , 'y' : y } try : composite . appendAnchor ( anchor_dict ) except TypeError : # pragma: no cover # fontParts API composite . appendAnchor ( name , ( x , y ) )
Propagate anchors from base glyphs to a given composite glyph and to all composite glyphs used in between .
600
22
23,986
def _get_anchor_data ( anchor_data , glyphSet , components , anchor_name ) : anchors = [ ] for component in components : for anchor in glyphSet [ component . baseGlyph ] . anchors : if anchor . name == anchor_name : anchors . append ( ( anchor , component ) ) break if len ( anchors ) > 1 : for i , ( anchor , component ) in enumerate ( anchors ) : t = Transform ( * component . transformation ) name = '%s_%d' % ( anchor . name , i + 1 ) anchor_data [ name ] = t . transformPoint ( ( anchor . x , anchor . y ) ) elif anchors : anchor , component = anchors [ 0 ] t = Transform ( * component . transformation ) anchor_data [ anchor . name ] = t . transformPoint ( ( anchor . x , anchor . y ) )
Get data for an anchor from a list of components .
186
11
23,987
def setContext ( self , font , feaFile , compiler = None ) : todo = set ( self . features ) if self . mode == "skip" : existing = ast . findFeatureTags ( feaFile ) todo . difference_update ( existing ) self . context = SimpleNamespace ( font = font , feaFile = feaFile , compiler = compiler , todo = todo ) return self . context
Populate a temporary self . context namespace which is reset after each new call to _write method . Subclasses can override this to provide contextual information which depends on other data or set any temporary attributes .
90
40
23,988
def write ( self , font , feaFile , compiler = None ) : self . setContext ( font , feaFile , compiler = compiler ) try : if self . shouldContinue ( ) : return self . _write ( ) else : return False finally : del self . context
Write features and class definitions for this font to a feaLib FeatureFile object . Returns True if feature file was modified False if no new features were generated .
58
32
23,989
def makeUnicodeToGlyphNameMapping ( self ) : # Try to get the "best" Unicode cmap subtable if this writer is running # in the context of a FeatureCompiler, else create a new mapping from # the UFO glyphs compiler = self . context . compiler cmap = None if compiler is not None : table = compiler . ttFont . get ( "cmap" ) if table is not None : cmap = table . getBestCmap ( ) if cmap is None : from ufo2ft . util import makeUnicodeToGlyphNameMapping if compiler is not None : glyphSet = compiler . glyphSet else : glyphSet = self . context . font cmap = makeUnicodeToGlyphNameMapping ( glyphSet ) return cmap
Return the Unicode to glyph name mapping for the current font .
173
12
23,990
def compileGSUB ( self ) : from ufo2ft . util import compileGSUB compiler = self . context . compiler if compiler is not None : # The result is cached in the compiler instance, so if another # writer requests one it is not compiled again. if hasattr ( compiler , "_gsub" ) : return compiler . _gsub glyphOrder = compiler . ttFont . getGlyphOrder ( ) else : # the 'real' glyph order doesn't matter because the table is not # compiled to binary, only the glyph names are used glyphOrder = sorted ( self . context . font . keys ( ) ) gsub = compileGSUB ( self . context . feaFile , glyphOrder ) if compiler and not hasattr ( compiler , "_gsub" ) : compiler . _gsub = gsub return gsub
Compile a temporary GSUB table from the current feature file .
176
13
23,991
def compile ( self ) : self . otf = TTFont ( sfntVersion = self . sfntVersion ) # only compile vertical metrics tables if vhea metrics a defined vertical_metrics = [ "openTypeVheaVertTypoAscender" , "openTypeVheaVertTypoDescender" , "openTypeVheaVertTypoLineGap" , "openTypeVheaCaretSlopeRise" , "openTypeVheaCaretSlopeRun" , "openTypeVheaCaretOffset" , ] self . vertical = all ( getAttrWithFallback ( self . ufo . info , metric ) is not None for metric in vertical_metrics ) # write the glyph order self . otf . setGlyphOrder ( self . glyphOrder ) # populate basic tables self . setupTable_head ( ) self . setupTable_hmtx ( ) self . setupTable_hhea ( ) self . setupTable_name ( ) self . setupTable_maxp ( ) self . setupTable_cmap ( ) self . setupTable_OS2 ( ) self . setupTable_post ( ) if self . vertical : self . setupTable_vmtx ( ) self . setupTable_vhea ( ) self . setupOtherTables ( ) self . importTTX ( ) return self . otf
Compile the OpenType binary .
290
7
23,992
def makeFontBoundingBox ( self ) : if not hasattr ( self , "glyphBoundingBoxes" ) : self . glyphBoundingBoxes = self . makeGlyphsBoundingBoxes ( ) fontBox = None for glyphName , glyphBox in self . glyphBoundingBoxes . items ( ) : if glyphBox is None : continue if fontBox is None : fontBox = glyphBox else : fontBox = unionRect ( fontBox , glyphBox ) if fontBox is None : # unlikely fontBox = BoundingBox ( 0 , 0 , 0 , 0 ) return fontBox
Make a bounding box for the font .
129
9
23,993
def makeMissingRequiredGlyphs ( font , glyphSet ) : if ".notdef" in glyphSet : return unitsPerEm = otRound ( getAttrWithFallback ( font . info , "unitsPerEm" ) ) ascender = otRound ( getAttrWithFallback ( font . info , "ascender" ) ) descender = otRound ( getAttrWithFallback ( font . info , "descender" ) ) defaultWidth = otRound ( unitsPerEm * 0.5 ) glyphSet [ ".notdef" ] = StubGlyph ( name = ".notdef" , width = defaultWidth , unitsPerEm = unitsPerEm , ascender = ascender , descender = descender )
Add . notdef to the glyph set if it is not present .
156
14
23,994
def setupTable_head ( self ) : if "head" not in self . tables : return self . otf [ "head" ] = head = newTable ( "head" ) font = self . ufo head . checkSumAdjustment = 0 head . tableVersion = 1.0 head . magicNumber = 0x5F0F3CF5 # version numbers # limit minor version to 3 digits as recommended in OpenType spec: # https://www.microsoft.com/typography/otspec/recom.htm versionMajor = getAttrWithFallback ( font . info , "versionMajor" ) versionMinor = getAttrWithFallback ( font . info , "versionMinor" ) fullFontRevision = float ( "%d.%03d" % ( versionMajor , versionMinor ) ) head . fontRevision = round ( fullFontRevision , 3 ) if head . fontRevision != fullFontRevision : logger . warning ( "Minor version in %s has too many digits and won't fit into " "the head table's fontRevision field; rounded to %s." , fullFontRevision , head . fontRevision ) # upm head . unitsPerEm = otRound ( getAttrWithFallback ( font . info , "unitsPerEm" ) ) # times head . created = dateStringToTimeValue ( getAttrWithFallback ( font . info , "openTypeHeadCreated" ) ) - mac_epoch_diff head . modified = dateStringToTimeValue ( dateStringForNow ( ) ) - mac_epoch_diff # bounding box xMin , yMin , xMax , yMax = self . fontBoundingBox head . xMin = otRound ( xMin ) head . yMin = otRound ( yMin ) head . xMax = otRound ( xMax ) head . yMax = otRound ( yMax ) # style mapping styleMapStyleName = getAttrWithFallback ( font . info , "styleMapStyleName" ) macStyle = [ ] if styleMapStyleName == "bold" : macStyle = [ 0 ] elif styleMapStyleName == "bold italic" : macStyle = [ 0 , 1 ] elif styleMapStyleName == "italic" : macStyle = [ 1 ] head . macStyle = intListToNum ( macStyle , 0 , 16 ) # misc head . flags = intListToNum ( getAttrWithFallback ( font . info , "openTypeHeadFlags" ) , 0 , 16 ) head . lowestRecPPEM = otRound ( getAttrWithFallback ( font . info , "openTypeHeadLowestRecPPEM" ) ) head . fontDirectionHint = 2 head . indexToLocFormat = 0 head . glyphDataFormat = 0
Make the head table .
598
5
23,995
def setupTable_name ( self ) : if "name" not in self . tables : return font = self . ufo self . otf [ "name" ] = name = newTable ( "name" ) name . names = [ ] # Set name records from font.info.openTypeNameRecords for nameRecord in getAttrWithFallback ( font . info , "openTypeNameRecords" ) : nameId = nameRecord [ "nameID" ] platformId = nameRecord [ "platformID" ] platEncId = nameRecord [ "encodingID" ] langId = nameRecord [ "languageID" ] # on Python 2, plistLib (used by ufoLib) returns unicode strings # only when plist data contain non-ascii characters, and returns # ascii-encoded bytes when it can. On the other hand, fontTools's # name table `setName` method wants unicode strings, so we must # decode them first nameVal = tounicode ( nameRecord [ "string" ] , encoding = 'ascii' ) name . setName ( nameVal , nameId , platformId , platEncId , langId ) # Build name records familyName = getAttrWithFallback ( font . info , "styleMapFamilyName" ) styleName = getAttrWithFallback ( font . info , "styleMapStyleName" ) . title ( ) preferredFamilyName = getAttrWithFallback ( font . info , "openTypeNamePreferredFamilyName" ) preferredSubfamilyName = getAttrWithFallback ( font . info , "openTypeNamePreferredSubfamilyName" ) fullName = "%s %s" % ( preferredFamilyName , preferredSubfamilyName ) nameVals = { 0 : getAttrWithFallback ( font . info , "copyright" ) , 1 : familyName , 2 : styleName , 3 : getAttrWithFallback ( font . info , "openTypeNameUniqueID" ) , 4 : fullName , 5 : getAttrWithFallback ( font . info , "openTypeNameVersion" ) , 6 : getAttrWithFallback ( font . info , "postscriptFontName" ) , 7 : getAttrWithFallback ( font . info , "trademark" ) , 8 : getAttrWithFallback ( font . info , "openTypeNameManufacturer" ) , 9 : getAttrWithFallback ( font . info , "openTypeNameDesigner" ) , 10 : getAttrWithFallback ( font . info , "openTypeNameDescription" ) , 11 : getAttrWithFallback ( font . info , "openTypeNameManufacturerURL" ) , 12 : getAttrWithFallback ( font . info , "openTypeNameDesignerURL" ) , 13 : getAttrWithFallback ( font . info , "openTypeNameLicense" ) , 14 : getAttrWithFallback ( font . info , "openTypeNameLicenseURL" ) , 16 : preferredFamilyName , 17 : preferredSubfamilyName , } # don't add typographic names if they are the same as the legacy ones if nameVals [ 1 ] == nameVals [ 16 ] : del nameVals [ 16 ] if nameVals [ 2 ] == nameVals [ 17 ] : del nameVals [ 17 ] # postscript font name if nameVals [ 6 ] : nameVals [ 6 ] = normalizeStringForPostscript ( nameVals [ 6 ] ) for nameId in sorted ( nameVals . keys ( ) ) : nameVal = nameVals [ nameId ] if not nameVal : continue nameVal = tounicode ( nameVal , encoding = 'ascii' ) platformId = 3 platEncId = 10 if _isNonBMP ( nameVal ) else 1 langId = 0x409 # Set built name record if not set yet if name . getName ( nameId , platformId , platEncId , langId ) : continue name . setName ( nameVal , nameId , platformId , platEncId , langId )
Make the name table .
889
5
23,996
def setupTable_cmap ( self ) : if "cmap" not in self . tables : return from fontTools . ttLib . tables . _c_m_a_p import cmap_format_4 nonBMP = dict ( ( k , v ) for k , v in self . unicodeToGlyphNameMapping . items ( ) if k > 65535 ) if nonBMP : mapping = dict ( ( k , v ) for k , v in self . unicodeToGlyphNameMapping . items ( ) if k <= 65535 ) else : mapping = dict ( self . unicodeToGlyphNameMapping ) # mac cmap4_0_3 = cmap_format_4 ( 4 ) cmap4_0_3 . platformID = 0 cmap4_0_3 . platEncID = 3 cmap4_0_3 . language = 0 cmap4_0_3 . cmap = mapping # windows cmap4_3_1 = cmap_format_4 ( 4 ) cmap4_3_1 . platformID = 3 cmap4_3_1 . platEncID = 1 cmap4_3_1 . language = 0 cmap4_3_1 . cmap = mapping # store self . otf [ "cmap" ] = cmap = newTable ( "cmap" ) cmap . tableVersion = 0 cmap . tables = [ cmap4_0_3 , cmap4_3_1 ] # If we have glyphs outside Unicode BMP, we must set another # subtable that can hold longer codepoints for them. if nonBMP : from fontTools . ttLib . tables . _c_m_a_p import cmap_format_12 nonBMP . update ( mapping ) # mac cmap12_0_4 = cmap_format_12 ( 12 ) cmap12_0_4 . platformID = 0 cmap12_0_4 . platEncID = 4 cmap12_0_4 . language = 0 cmap12_0_4 . cmap = nonBMP # windows cmap12_3_10 = cmap_format_12 ( 12 ) cmap12_3_10 . platformID = 3 cmap12_3_10 . platEncID = 10 cmap12_3_10 . language = 0 cmap12_3_10 . cmap = nonBMP # update tables registry cmap . tables = [ cmap4_0_3 , cmap4_3_1 , cmap12_0_4 , cmap12_3_10 ]
Make the cmap table .
576
6
23,997
def setupTable_hmtx ( self ) : if "hmtx" not in self . tables : return self . otf [ "hmtx" ] = hmtx = newTable ( "hmtx" ) hmtx . metrics = { } for glyphName , glyph in self . allGlyphs . items ( ) : width = otRound ( glyph . width ) if width < 0 : raise ValueError ( "The width should not be negative: '%s'" % ( glyphName ) ) bounds = self . glyphBoundingBoxes [ glyphName ] left = bounds . xMin if bounds else 0 hmtx [ glyphName ] = ( width , left )
Make the hmtx table .
147
7
23,998
def setupTable_vmtx ( self ) : if "vmtx" not in self . tables : return self . otf [ "vmtx" ] = vmtx = newTable ( "vmtx" ) vmtx . metrics = { } for glyphName , glyph in self . allGlyphs . items ( ) : height = otRound ( glyph . height ) if height < 0 : raise ValueError ( "The height should not be negative: '%s'" % ( glyphName ) ) verticalOrigin = _getVerticalOrigin ( self . otf , glyph ) bounds = self . glyphBoundingBoxes [ glyphName ] top = bounds . yMax if bounds else 0 vmtx [ glyphName ] = ( height , verticalOrigin - top )
Make the vmtx table .
162
7
23,999
def setupTable_VORG ( self ) : if "VORG" not in self . tables : return self . otf [ "VORG" ] = vorg = newTable ( "VORG" ) vorg . majorVersion = 1 vorg . minorVersion = 0 vorg . VOriginRecords = { } # Find the most frequent verticalOrigin vorg_count = Counter ( _getVerticalOrigin ( self . otf , glyph ) for glyph in self . allGlyphs . values ( ) ) vorg . defaultVertOriginY = vorg_count . most_common ( 1 ) [ 0 ] [ 0 ] if len ( vorg_count ) > 1 : for glyphName , glyph in self . allGlyphs . items ( ) : vorg . VOriginRecords [ glyphName ] = _getVerticalOrigin ( self . otf , glyph ) vorg . numVertOriginYMetrics = len ( vorg . VOriginRecords )
Make the VORG table .
213
7