idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
240,500
def utcoffset ( self , dt , is_dst = None ) : if dt is None : return None elif dt . tzinfo is not self : dt = self . localize ( dt , is_dst ) return dt . tzinfo . _utcoffset else : return self . _utcoffset
See datetime . tzinfo . utcoffset
78
12
240,501
def dst ( self , dt , is_dst = None ) : if dt is None : return None elif dt . tzinfo is not self : dt = self . localize ( dt , is_dst ) return dt . tzinfo . _dst else : return self . _dst
See datetime . tzinfo . dst
71
9
240,502
def tzname ( self , dt , is_dst = None ) : if dt is None : return self . zone elif dt . tzinfo is not self : dt = self . localize ( dt , is_dst ) return dt . tzinfo . _tzname else : return self . _tzname
See datetime . tzinfo . tzname
75
11
240,503
def check_range ( number , min_r , max_r , name = "" ) : try : number = float ( number ) if number < min_r or number > max_r : raise FFmpegNormalizeError ( "{} must be within [{},{}]" . format ( name , min_r , max_r ) ) return number pass except Exception as e : raise e
Check if a number is within a given range
83
9
240,504
def add_media_file ( self , input_file , output_file ) : if not os . path . exists ( input_file ) : raise FFmpegNormalizeError ( "file " + input_file + " does not exist" ) ext = os . path . splitext ( output_file ) [ 1 ] [ 1 : ] if ( self . audio_codec is None or 'pcm' in self . audio_codec ) and ext in PCM_INCOMPATIBLE_EXTS : raise FFmpegNormalizeError ( "Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option." . format ( ext ) ) mf = MediaFile ( self , input_file , output_file ) self . media_files . append ( mf ) self . file_count += 1
Add a media file to normalize
183
7
240,505
def run_normalization ( self ) : for index , media_file in enumerate ( tqdm ( self . media_files , desc = "File" , disable = not self . progress , position = 0 ) ) : logger . info ( "Normalizing file {} ({} of {})" . format ( media_file , index + 1 , self . file_count ) ) media_file . run_normalization ( ) logger . info ( "Normalized file written to {}" . format ( media_file . output_file ) )
Run the normalization procedures
114
5
240,506
def get_ffmpeg_exe ( ) : if 'FFMPEG_PATH' in os . environ : ffmpeg_exe = os . environ [ 'FFMPEG_PATH' ] else : ffmpeg_exe = which ( 'ffmpeg' ) if not ffmpeg_exe : if which ( 'avconv' ) : raise FFmpegNormalizeError ( "avconv is not supported. " "Please install ffmpeg from http://ffmpeg.org instead." ) else : raise FFmpegNormalizeError ( "Could not find ffmpeg in your $PATH or $FFMPEG_PATH. " "Please install ffmpeg from http://ffmpeg.org" ) return ffmpeg_exe
Return path to ffmpeg executable
149
6
240,507
def ffmpeg_has_loudnorm ( ) : cmd_runner = CommandRunner ( [ get_ffmpeg_exe ( ) , '-filters' ] ) cmd_runner . run_command ( ) output = cmd_runner . get_output ( ) if 'loudnorm' in output : return True else : logger . warning ( "Your ffmpeg version does not support the 'loudnorm' filter. " "Please make sure you are running ffmpeg v3.1 or above." ) return False
Run feature detection on ffmpeg returns True if ffmpeg supports the loudnorm filter
109
16
240,508
def parse_streams ( self ) : logger . debug ( "Parsing streams of {}" . format ( self . input_file ) ) cmd = [ self . ffmpeg_normalize . ffmpeg_exe , '-i' , self . input_file , '-c' , 'copy' , '-t' , '0' , '-map' , '0' , '-f' , 'null' , NUL ] cmd_runner = CommandRunner ( cmd ) cmd_runner . run_command ( ) output = cmd_runner . get_output ( ) logger . debug ( "Stream parsing command output:" ) logger . debug ( output ) output_lines = [ line . strip ( ) for line in output . split ( '\n' ) ] for line in output_lines : if not line . startswith ( 'Stream' ) : continue stream_id_match = re . search ( r'#0:([\d]+)' , line ) if stream_id_match : stream_id = int ( stream_id_match . group ( 1 ) ) if stream_id in self . _stream_ids ( ) : continue else : continue if 'Audio' in line : logger . debug ( "Found audio stream at index {}" . format ( stream_id ) ) sample_rate_match = re . search ( r'(\d+) Hz' , line ) sample_rate = int ( sample_rate_match . group ( 1 ) ) if sample_rate_match else None bit_depth_match = re . search ( r's(\d+)p?,' , line ) bit_depth = int ( bit_depth_match . group ( 1 ) ) if bit_depth_match else None self . streams [ 'audio' ] [ stream_id ] = AudioStream ( self , stream_id , sample_rate , bit_depth ) elif 'Video' in line : logger . debug ( "Found video stream at index {}" . format ( stream_id ) ) self . streams [ 'video' ] [ stream_id ] = VideoStream ( self , stream_id ) elif 'Subtitle' in line : logger . debug ( "Found subtitle stream at index {}" . format ( stream_id ) ) self . streams [ 'subtitle' ] [ stream_id ] = SubtitleStream ( self , stream_id ) if not self . streams [ 'audio' ] : raise FFmpegNormalizeError ( "Input file {} does not contain any audio streams" . format ( self . input_file ) ) if os . path . splitext ( self . output_file ) [ 1 ] . lower ( ) in [ '.wav' , '.mp3' , '.aac' ] : logger . warning ( "Output file only supports one stream. " "Keeping only first audio stream." ) first_stream = list ( self . streams [ 'audio' ] . values ( ) ) [ 0 ] self . streams [ 'audio' ] = { first_stream . stream_id : first_stream } self . streams [ 'video' ] = { } self . streams [ 'subtitle' ] = { }
Try to parse all input streams from file
673
8
240,509
def _get_audio_filter_cmd ( self ) : all_filters = [ ] output_labels = [ ] for audio_stream in self . streams [ 'audio' ] . values ( ) : if self . ffmpeg_normalize . normalization_type == 'ebu' : stream_filter = audio_stream . get_second_pass_opts_ebu ( ) else : stream_filter = audio_stream . get_second_pass_opts_peakrms ( ) input_label = '[0:{}]' . format ( audio_stream . stream_id ) output_label = '[norm{}]' . format ( audio_stream . stream_id ) output_labels . append ( output_label ) all_filters . append ( input_label + stream_filter + output_label ) filter_complex_cmd = ';' . join ( all_filters ) return filter_complex_cmd , output_labels
Return filter_complex command and output labels needed
207
9
240,510
def parse_volumedetect_stats ( self ) : logger . info ( "Running first pass volumedetect filter for stream {}" . format ( self . stream_id ) ) filter_str = '[0:{}]volumedetect' . format ( self . stream_id ) cmd = [ self . media_file . ffmpeg_normalize . ffmpeg_exe , '-nostdin' , '-y' , '-i' , self . media_file . input_file , '-filter_complex' , filter_str , '-vn' , '-sn' , '-f' , 'null' , NUL ] cmd_runner = CommandRunner ( cmd ) for progress in cmd_runner . run_ffmpeg_command ( ) : yield progress output = cmd_runner . get_output ( ) logger . debug ( "Volumedetect command output:" ) logger . debug ( output ) mean_volume_matches = re . findall ( r"mean_volume: ([\-\d\.]+) dB" , output ) if mean_volume_matches : self . loudness_statistics [ 'mean' ] = float ( mean_volume_matches [ 0 ] ) else : raise FFmpegNormalizeError ( "Could not get mean volume for {}" . format ( self . media_file . input_file ) ) max_volume_matches = re . findall ( r"max_volume: ([\-\d\.]+) dB" , output ) if max_volume_matches : self . loudness_statistics [ 'max' ] = float ( max_volume_matches [ 0 ] ) else : raise FFmpegNormalizeError ( "Could not get max volume for {}" . format ( self . media_file . input_file ) )
Use ffmpeg with volumedetect filter to get the mean volume of the input file .
391
19
240,511
def parse_loudnorm_stats ( self ) : logger . info ( "Running first pass loudnorm filter for stream {}" . format ( self . stream_id ) ) opts = { 'i' : self . media_file . ffmpeg_normalize . target_level , 'lra' : self . media_file . ffmpeg_normalize . loudness_range_target , 'tp' : self . media_file . ffmpeg_normalize . true_peak , 'offset' : self . media_file . ffmpeg_normalize . offset , 'print_format' : 'json' } if self . media_file . ffmpeg_normalize . dual_mono : opts [ 'dual_mono' ] = 'true' filter_str = '[0:{}]' . format ( self . stream_id ) + 'loudnorm=' + dict_to_filter_opts ( opts ) cmd = [ self . media_file . ffmpeg_normalize . ffmpeg_exe , '-nostdin' , '-y' , '-i' , self . media_file . input_file , '-filter_complex' , filter_str , '-vn' , '-sn' , '-f' , 'null' , NUL ] cmd_runner = CommandRunner ( cmd ) for progress in cmd_runner . run_ffmpeg_command ( ) : yield progress output = cmd_runner . get_output ( ) logger . debug ( "Loudnorm first pass command output:" ) logger . debug ( output ) output_lines = [ line . strip ( ) for line in output . split ( '\n' ) ] loudnorm_start = False loudnorm_end = False for index , line in enumerate ( output_lines ) : if line . startswith ( '[Parsed_loudnorm' ) : loudnorm_start = index + 1 continue if loudnorm_start and line . startswith ( '}' ) : loudnorm_end = index + 1 break if not ( loudnorm_start and loudnorm_end ) : raise FFmpegNormalizeError ( "Could not parse loudnorm stats; no loudnorm-related output found" ) try : loudnorm_stats = json . loads ( '\n' . join ( output_lines [ loudnorm_start : loudnorm_end ] ) ) except Exception as e : raise FFmpegNormalizeError ( "Could not parse loudnorm stats; wrong JSON format in string: {}" . format ( e ) ) logger . debug ( "Loudnorm stats parsed: {}" . format ( json . dumps ( loudnorm_stats ) ) ) self . loudness_statistics [ 'ebu' ] = loudnorm_stats for key , val in self . loudness_statistics [ 'ebu' ] . items ( ) : if key == 'normalization_type' : continue # FIXME: drop Python 2 support and just use math.inf if float ( val ) == - float ( "inf" ) : self . loudness_statistics [ 'ebu' ] [ key ] = - 99 elif float ( val ) == float ( "inf" ) : self . loudness_statistics [ 'ebu' ] [ key ] = 0
Run a first pass loudnorm filter to get measured data .
707
12
240,512
def get_second_pass_opts_ebu ( self ) : if not self . loudness_statistics [ 'ebu' ] : raise FFmpegNormalizeError ( "First pass not run, you must call parse_loudnorm_stats first" ) input_i = float ( self . loudness_statistics [ 'ebu' ] [ "input_i" ] ) if input_i > 0 : logger . warn ( "Input file had measured input loudness greater than zero ({}), capping at 0" . format ( "input_i" ) ) self . loudness_statistics [ 'ebu' ] [ 'input_i' ] = 0 opts = { 'i' : self . media_file . ffmpeg_normalize . target_level , 'lra' : self . media_file . ffmpeg_normalize . loudness_range_target , 'tp' : self . media_file . ffmpeg_normalize . true_peak , 'offset' : self . media_file . ffmpeg_normalize . offset , 'measured_i' : float ( self . loudness_statistics [ 'ebu' ] [ 'input_i' ] ) , 'measured_lra' : float ( self . loudness_statistics [ 'ebu' ] [ 'input_lra' ] ) , 'measured_tp' : float ( self . loudness_statistics [ 'ebu' ] [ 'input_tp' ] ) , 'measured_thresh' : float ( self . loudness_statistics [ 'ebu' ] [ 'input_thresh' ] ) , 'linear' : 'true' , 'print_format' : 'json' } if self . media_file . ffmpeg_normalize . dual_mono : opts [ 'dual_mono' ] = 'true' return 'loudnorm=' + dict_to_filter_opts ( opts )
Return second pass loudnorm filter options string for ffmpeg
431
11
240,513
def setup_custom_logger ( name ) : global loggers if loggers . get ( name ) : return loggers . get ( name ) formatter = logging . Formatter ( fmt = '%(levelname)s: %(message)s' ) # handler = logging.StreamHandler() handler = TqdmLoggingHandler ( ) handler . setFormatter ( formatter ) # \033[1;30m - black # \033[1;31m - red # \033[1;32m - green # \033[1;33m - yellow # \033[1;34m - blue # \033[1;35m - magenta # \033[1;36m - cyan # \033[1;37m - white if system ( ) not in [ 'Windows' , 'cli' ] : logging . addLevelName ( logging . ERROR , "\033[1;31m%s\033[1;0m" % logging . getLevelName ( logging . ERROR ) ) logging . addLevelName ( logging . WARNING , "\033[1;33m%s\033[1;0m" % logging . getLevelName ( logging . WARNING ) ) logging . addLevelName ( logging . INFO , "\033[1;34m%s\033[1;0m" % logging . getLevelName ( logging . INFO ) ) logging . addLevelName ( logging . DEBUG , "\033[1;35m%s\033[1;0m" % logging . getLevelName ( logging . DEBUG ) ) logger = logging . getLogger ( name ) logger . setLevel ( logging . WARNING ) # if (logger.hasHandlers()): # logger.handlers.clear() if logger . handlers : logger . handlers = [ ] logger . addHandler ( handler ) loggers . update ( dict ( name = logger ) ) return logger
Create a logger with a certain name and level
406
9
240,514
def register_checkers ( linter ) : linter . register_checker ( ModelChecker ( linter ) ) linter . register_checker ( DjangoInstalledChecker ( linter ) ) linter . register_checker ( JsonResponseChecker ( linter ) ) linter . register_checker ( FormChecker ( linter ) )
Register checkers .
78
4
240,515
def register ( linter ) : linter . register_checker ( NewDbFieldWithDefaultChecker ( linter ) ) if not compat . LOAD_CONFIGURATION_SUPPORTED : load_configuration ( linter )
Required method to auto register this checker .
51
9
240,516
def ignore_import_warnings_for_related_fields ( orig_method , self , node ) : consumer = self . _to_consume [ 0 ] # pylint: disable=W0212 # we can disable this warning ('Access to a protected member _to_consume of a client class') # as it's not actually a client class, but rather, this method is being monkey patched # onto the class and so the access is valid new_things = { } iterat = consumer . to_consume . items if PY3 else consumer . to_consume . iteritems for name , stmts in iterat ( ) : if isinstance ( stmts [ 0 ] , ImportFrom ) : if any ( [ n [ 0 ] in ( 'ForeignKey' , 'OneToOneField' ) for n in stmts [ 0 ] . names ] ) : continue new_things [ name ] = stmts consumer . _atomic = ScopeConsumer ( new_things , consumer . consumed , consumer . scope_type ) # pylint: disable=W0212 self . _to_consume = [ consumer ] # pylint: disable=W0212 return orig_method ( self , node )
Replaces the leave_module method on the VariablesChecker class to prevent unused - import warnings which are caused by the ForeignKey and OneToOneField transformations . By replacing the nodes in the AST with their type rather than the django field imports of the form from django . db . models import OneToOneField raise an unused - import warning
263
72
240,517
def is_model_admin_subclass ( node ) : if node . name [ - 5 : ] != 'Admin' or isinstance ( node . parent , ClassDef ) : return False return node_is_subclass ( node , 'django.contrib.admin.options.ModelAdmin' )
Checks that node is derivative of ModelAdmin class .
65
11
240,518
def is_model_factory ( node ) : try : parent_classes = node . expr . inferred ( ) except : # noqa: E722, pylint: disable=bare-except return False parents = ( 'factory.declarations.LazyFunction' , 'factory.declarations.SubFactory' , 'factory.django.DjangoModelFactory' ) for parent_class in parent_classes : try : if parent_class . qname ( ) in parents : return True if node_is_subclass ( parent_class , * parents ) : return True except AttributeError : continue return False
Checks that node is derivative of DjangoModelFactory or SubFactory class .
136
15
240,519
def is_model_mpttmeta_subclass ( node ) : if node . name != 'MPTTMeta' or not isinstance ( node . parent , ClassDef ) : return False parents = ( 'django.db.models.base.Model' , '.Model' , # for the transformed version used in this plugin 'django.forms.forms.Form' , '.Form' , 'django.forms.models.ModelForm' , '.ModelForm' ) return node_is_subclass ( node . parent , * parents )
Checks that node is derivative of MPTTMeta class .
117
12
240,520
def _attribute_is_magic ( node , attrs , parents ) : if node . attrname not in attrs : return False if not node . last_child ( ) : return False try : for cls in node . last_child ( ) . inferred ( ) : if isinstance ( cls , Super ) : cls = cls . _self_class # pylint: disable=protected-access if node_is_subclass ( cls , * parents ) or cls . qname ( ) in parents : return True except InferenceError : pass return False
Checks that node is an attribute used inside one of allowed parents
124
13
240,521
def generic_is_view_attribute ( parents , attrs ) : def is_attribute ( node ) : return _attribute_is_magic ( node , attrs , parents ) return is_attribute
Generates is_X_attribute function for given parents and attrs .
42
15
240,522
def is_model_view_subclass_method_shouldnt_be_function ( node ) : if node . name not in ( 'get' , 'post' ) : return False parent = node . parent while parent and not isinstance ( parent , ScopedClass ) : parent = parent . parent subclass = ( 'django.views.View' , 'django.views.generic.View' , 'django.views.generic.base.View' , ) return parent is not None and node_is_subclass ( parent , * subclass )
Checks that node is get or post method of the View class .
119
14
240,523
def is_model_media_valid_attributes ( node ) : if node . name not in ( 'js' , ) : return False parent = node . parent while parent and not isinstance ( parent , ScopedClass ) : parent = parent . parent if parent is None or parent . name != "Media" : return False return True
Suppress warnings for valid attributes of Media class .
71
10
240,524
def is_templatetags_module_valid_constant ( node ) : if node . name not in ( 'register' , ) : return False parent = node . parent while not isinstance ( parent , Module ) : parent = parent . parent if "templatetags." not in parent . name : return False return True
Suppress warnings for valid constants in templatetags module .
72
14
240,525
def is_urls_module_valid_constant ( node ) : if node . name not in ( 'urlpatterns' , 'app_name' ) : return False parent = node . parent while not isinstance ( parent , Module ) : parent = parent . parent if not parent . name . endswith ( 'urls' ) : return False return True
Suppress warnings for valid constants in urls module .
78
11
240,526
def load_configuration ( linter ) : name_checker = get_checker ( linter , NameChecker ) name_checker . config . good_names += ( 'qs' , 'urlpatterns' , 'register' , 'app_name' , 'handler500' ) # we don't care about South migrations linter . config . black_list += ( 'migrations' , 'south_migrations' )
Amend existing checker config .
97
7
240,527
def register ( linter ) : # add all of the checkers register_checkers ( linter ) # register any checking fiddlers try : from pylint_django . augmentations import apply_augmentations apply_augmentations ( linter ) except ImportError : # probably trying to execute pylint_django when Django isn't installed # in this case the django-not-installed checker will kick-in pass if not compat . LOAD_CONFIGURATION_SUPPORTED : load_configuration ( linter )
Registering additional checkers .
118
6
240,528
async def create_object ( model , * * data ) : # NOTE! Here are internals involved: # # - obj._data # - obj._get_pk_value() # - obj._set_pk_value() # - obj._prepare_instance() # warnings . warn ( "create_object() is deprecated, Manager.create() " "should be used instead" , DeprecationWarning ) obj = model ( * * data ) pk = await insert ( model . insert ( * * dict ( obj . __data__ ) ) ) if obj . _pk is None : obj . _pk = pk return obj
Create object asynchronously .
139
6
240,529
async def get_object ( source , * args ) : warnings . warn ( "get_object() is deprecated, Manager.get() " "should be used instead" , DeprecationWarning ) if isinstance ( source , peewee . Query ) : query = source model = query . model else : query = source . select ( ) model = source # Return first object from query for obj in ( await select ( query . where ( * args ) ) ) : return obj # No objects found raise model . DoesNotExist
Get object asynchronously .
111
6
240,530
async def delete_object ( obj , recursive = False , delete_nullable = False ) : warnings . warn ( "delete_object() is deprecated, Manager.delete() " "should be used instead" , DeprecationWarning ) # Here are private calls involved: # - obj._pk_expr() if recursive : dependencies = obj . dependencies ( delete_nullable ) for query , fk in reversed ( list ( dependencies ) ) : model = fk . model if fk . null and not delete_nullable : await update ( model . update ( * * { fk . name : None } ) . where ( query ) ) else : await delete ( model . delete ( ) . where ( query ) ) result = await delete ( obj . delete ( ) . where ( obj . _pk_expr ( ) ) ) return result
Delete object asynchronously .
178
6
240,531
async def update_object ( obj , only = None ) : # Here are private calls involved: # # - obj._data # - obj._meta # - obj._prune_fields() # - obj._pk_expr() # - obj._dirty.clear() # warnings . warn ( "update_object() is deprecated, Manager.update() " "should be used instead" , DeprecationWarning ) field_dict = dict ( obj . __data__ ) pk_field = obj . _meta . primary_key if only : field_dict = obj . _prune_fields ( field_dict , only ) if not isinstance ( pk_field , peewee . CompositeKey ) : field_dict . pop ( pk_field . name , None ) else : field_dict = obj . _prune_fields ( field_dict , obj . dirty_fields ) rows = await update ( obj . update ( * * field_dict ) . where ( obj . _pk_expr ( ) ) ) obj . _dirty . clear ( ) return rows
Update object asynchronously .
230
6
240,532
async def select ( query ) : assert isinstance ( query , peewee . SelectQuery ) , ( "Error, trying to run select coroutine" "with wrong query class %s" % str ( query ) ) cursor = await _execute_query_async ( query ) result = AsyncQueryWrapper ( cursor = cursor , query = query ) try : while True : await result . fetchone ( ) except GeneratorExit : pass finally : await cursor . release ( ) return result
Perform SELECT query asynchronously .
103
8
240,533
async def insert ( query ) : assert isinstance ( query , peewee . Insert ) , ( "Error, trying to run insert coroutine" "with wrong query class %s" % str ( query ) ) cursor = await _execute_query_async ( query ) try : if query . _returning : row = await cursor . fetchone ( ) result = row [ 0 ] else : database = _query_db ( query ) last_id = await database . last_insert_id_async ( cursor ) result = last_id finally : await cursor . release ( ) return result
Perform INSERT query asynchronously . Returns last insert ID . This function is called by object . create for single objects only .
127
27
240,534
async def update ( query ) : assert isinstance ( query , peewee . Update ) , ( "Error, trying to run update coroutine" "with wrong query class %s" % str ( query ) ) cursor = await _execute_query_async ( query ) rowcount = cursor . rowcount await cursor . release ( ) return rowcount
Perform UPDATE query asynchronously . Returns number of rows updated .
75
14
240,535
async def delete ( query ) : assert isinstance ( query , peewee . Delete ) , ( "Error, trying to run delete coroutine" "with wrong query class %s" % str ( query ) ) cursor = await _execute_query_async ( query ) rowcount = cursor . rowcount await cursor . release ( ) return rowcount
Perform DELETE query asynchronously . Returns number of rows deleted .
75
16
240,536
def sync_unwanted ( database ) : warnings . warn ( "sync_unwanted() context manager is deprecated, " "use database's `.allow_sync()` context manager or " "`Manager.allow_sync()` context manager. " , DeprecationWarning ) old_allow_sync = database . _allow_sync database . _allow_sync = False yield database . _allow_sync = old_allow_sync
Context manager for preventing unwanted sync queries . UnwantedSyncQueryError exception will raise on such query .
94
21
240,537
async def get ( self , source_ , * args , * * kwargs ) : await self . connect ( ) if isinstance ( source_ , peewee . Query ) : query = source_ model = query . model else : query = source_ . select ( ) model = source_ conditions = list ( args ) + [ ( getattr ( model , k ) == v ) for k , v in kwargs . items ( ) ] if conditions : query = query . where ( * conditions ) try : result = await self . execute ( query ) return list ( result ) [ 0 ] except IndexError : raise model . DoesNotExist
Get the model instance .
137
5
240,538
async def create ( self , model_ , * * data ) : inst = model_ ( * * data ) query = model_ . insert ( * * dict ( inst . __data__ ) ) pk = await self . execute ( query ) if inst . _pk is None : inst . _pk = pk return inst
Create a new object saved to database .
71
8
240,539
async def get_or_create ( self , model_ , defaults = None , * * kwargs ) : try : return ( await self . get ( model_ , * * kwargs ) ) , False except model_ . DoesNotExist : data = defaults or { } data . update ( { k : v for k , v in kwargs . items ( ) if '__' not in k } ) return ( await self . create ( model_ , * * data ) ) , True
Try to get an object or create it with the specified defaults .
107
13
240,540
async def create_or_get ( self , model_ , * * kwargs ) : try : return ( await self . create ( model_ , * * kwargs ) ) , True except IntegrityErrors : query = [ ] for field_name , value in kwargs . items ( ) : field = getattr ( model_ , field_name ) if field . unique or field . primary_key : query . append ( field == value ) return ( await self . get ( model_ , * query ) ) , False
Try to create new object with specified data . If object already exists then try to get it by unique fields .
113
22
240,541
def _subclassed ( base , * classes ) : return all ( map ( lambda obj : isinstance ( obj , base ) , classes ) )
Check if all classes are subclassed from base .
31
10
240,542
def _get_result_wrapper ( self , query ) : cursor = RowsCursor ( self . _rows , self . _cursor . description ) return query . _get_cursor_wrapper ( cursor )
Get result wrapper class .
46
5
240,543
async def fetchone ( self ) : row = await self . _cursor . fetchone ( ) if not row : raise GeneratorExit self . _rows . append ( row )
Fetch single row from the cursor .
38
8
240,544
async def connect_async ( self , loop = None , timeout = None ) : if self . deferred : raise Exception ( "Error, database not properly initialized " "before opening connection" ) if self . _async_conn : return elif self . _async_wait : await self . _async_wait else : self . _loop = loop self . _async_wait = asyncio . Future ( loop = self . _loop ) conn = self . _async_conn_cls ( database = self . database , loop = self . _loop , timeout = timeout , * * self . connect_params_async ) try : await conn . connect ( ) except Exception as e : if not self . _async_wait . done ( ) : self . _async_wait . set_exception ( e ) self . _async_wait = None raise else : self . _task_data = TaskLocals ( loop = self . _loop ) self . _async_conn = conn self . _async_wait . set_result ( True )
Set up async connection on specified event loop or on default event loop .
233
14
240,545
async def cursor_async ( self ) : await self . connect_async ( loop = self . _loop ) if self . transaction_depth_async ( ) > 0 : conn = self . transaction_conn_async ( ) else : conn = None try : return ( await self . _async_conn . cursor ( conn = conn ) ) except : await self . close_async ( ) raise
Acquire async cursor .
89
5
240,546
async def close_async ( self ) : if self . _async_wait : await self . _async_wait if self . _async_conn : conn = self . _async_conn self . _async_conn = None self . _async_wait = None self . _task_data = None await conn . close ( )
Close async connection .
78
4
240,547
async def push_transaction_async ( self ) : await self . connect_async ( loop = self . loop ) depth = self . transaction_depth_async ( ) if not depth : conn = await self . _async_conn . acquire ( ) self . _task_data . set ( 'conn' , conn ) self . _task_data . set ( 'depth' , depth + 1 )
Increment async transaction depth .
90
6
240,548
async def pop_transaction_async ( self ) : depth = self . transaction_depth_async ( ) if depth > 0 : depth -= 1 self . _task_data . set ( 'depth' , depth ) if depth == 0 : conn = self . _task_data . get ( 'conn' ) self . _async_conn . release ( conn ) else : raise ValueError ( "Invalid async transaction depth value" )
Decrement async transaction depth .
95
6
240,549
def allow_sync ( self ) : old_allow_sync = self . _allow_sync self . _allow_sync = True try : yield except : raise finally : try : self . close ( ) except self . Error : pass # already closed self . _allow_sync = old_allow_sync
Allow sync queries within context . Close sync connection on exit if connected .
64
14
240,550
def execute_sql ( self , * args , * * kwargs ) : assert self . _allow_sync , ( "Error, sync query is not allowed! Call the `.set_allow_sync()` " "or use the `.allow_sync()` context manager." ) if self . _allow_sync in ( logging . ERROR , logging . WARNING ) : logging . log ( self . _allow_sync , "Error, sync query is not allowed: %s %s" % ( str ( args ) , str ( kwargs ) ) ) return super ( ) . execute_sql ( * args , * * kwargs )
Sync execute SQL query allow_sync must be set to True .
138
13
240,551
async def cursor ( self , conn = None , * args , * * kwargs ) : in_transaction = conn is not None if not conn : conn = await self . acquire ( ) cursor = await conn . cursor ( * args , * * kwargs ) cursor . release = functools . partial ( self . release_cursor , cursor , in_transaction = in_transaction ) return cursor
Get a cursor for the specified transaction connection or acquire from the pool .
89
14
240,552
def connect_params_async ( self ) : kwargs = self . connect_params . copy ( ) kwargs . update ( { 'minsize' : self . min_connections , 'maxsize' : self . max_connections , 'enable_json' : self . _enable_json , 'enable_hstore' : self . _enable_hstore , } ) return kwargs
Connection parameters for aiopg . Connection
89
8
240,553
async def release_cursor ( self , cursor , in_transaction = False ) : conn = cursor . connection await cursor . close ( ) if not in_transaction : self . release ( conn )
Release cursor coroutine . Unless in transaction the connection is also released back to the pool .
44
18
240,554
def connect_params_async ( self ) : kwargs = self . connect_params . copy ( ) kwargs . update ( { 'minsize' : self . min_connections , 'maxsize' : self . max_connections , 'autocommit' : True , } ) return kwargs
Connection parameters for aiomysql . Connection
70
9
240,555
def get ( self , key , * val ) : data = self . get_data ( ) if data is not None : return data . get ( key , * val ) if val : return val [ 0 ] raise KeyError ( key )
Get value stored for current running task . Optionally you may provide the default value . Raises KeyError when can t get the value and no default one is provided .
50
34
240,556
def set ( self , key , val ) : data = self . get_data ( True ) if data is not None : data [ key ] = val else : raise RuntimeError ( "No task is currently running" )
Set value stored for current running task .
46
8
240,557
def get_data ( self , create = False ) : task = asyncio_current_task ( loop = self . loop ) if task : task_id = id ( task ) if create and task_id not in self . data : self . data [ task_id ] = { } task . add_done_callback ( self . del_data ) return self . data . get ( task_id ) return None
Get dict stored for current running task . Return None or an empty dict if no data was found depending on the create argument value .
88
26
240,558
def _get_from_bin ( self ) : # Find the real interpreter installation path java_bin = os . path . realpath ( self . _java ) if os . path . exists ( java_bin ) : # Get to the home directory java_home = os . path . abspath ( os . path . join ( os . path . dirname ( java_bin ) , '..' ) ) # Look for the JVM library return self . find_libjvm ( java_home )
Retrieves the Java library path according to the real installation of the java executable
106
16
240,559
def initialize_options ( self , * args ) : import distutils . sysconfig cfg_vars = distutils . sysconfig . get_config_vars ( ) # if 'CFLAGS' in cfg_vars: # cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '') for k , v in cfg_vars . items ( ) : if isinstance ( v , str ) and v . find ( "-Wstrict-prototypes" ) : v = v . replace ( '-Wstrict-prototypes' , '' ) cfg_vars [ k ] = v if isinstance ( v , str ) and v . find ( "-Wimplicit-function-declaration" ) : v = v . replace ( '-Wimplicit-function-declaration' , '' ) cfg_vars [ k ] = v build_ext . initialize_options ( self )
omit - Wstrict - prototypes from CFLAGS since its only valid for C code .
217
20
240,560
def addClassPath ( path1 ) : global _CLASSPATHS path1 = _os . path . abspath ( path1 ) if _sys . platform == 'cygwin' : path1 = _posix2win ( path1 ) _CLASSPATHS . add ( str ( path1 ) )
Add a path to the java class path
69
8
240,561
def getClassPath ( ) : global _CLASSPATHS global _SEP out = [ ] for path in _CLASSPATHS : if path == '' : continue if path . endswith ( '*' ) : paths = _glob . glob ( path + ".jar" ) if len ( path ) == 0 : continue out . extend ( paths ) else : out . append ( path ) return _SEP . join ( out )
Get the full java class path .
96
7
240,562
def find_libjvm ( self , java_home ) : found_jamvm = False non_supported_jvm = ( 'cacao' , 'jamvm' ) found_non_supported_jvm = False # Look for the file for root , _ , names in os . walk ( java_home ) : if self . _libfile in names : # Found it, but check for non supported jvms candidate = os . path . split ( root ) [ 1 ] if candidate in non_supported_jvm : found_non_supported_jvm = True continue # maybe we will find another one? return os . path . join ( root , self . _libfile ) else : if found_non_supported_jvm : raise JVMNotSupportedException ( "Sorry '{0}' is known to be " "broken. Please ensure your " "JAVA_HOME contains at least " "another JVM implementation " "(eg. server)" . format ( candidate ) ) # File not found raise JVMNotFoundException ( "Sorry no JVM could be found. " "Please ensure your JAVA_HOME " "environment variable is pointing " "to correct installation." )
Recursively looks for the given file
256
8
240,563
def find_possible_homes ( self , parents ) : homes = [ ] java_names = ( 'jre' , 'jdk' , 'java' ) for parent in parents : for childname in sorted ( os . listdir ( parent ) ) : # Compute the real path path = os . path . realpath ( os . path . join ( parent , childname ) ) if path in homes or not os . path . isdir ( path ) : # Already known path, or not a directory -> ignore continue # Check if the path seems OK real_name = os . path . basename ( path ) . lower ( ) for java_name in java_names : if java_name in real_name : # Correct JVM folder name homes . append ( path ) yield path break
Generator that looks for the first - level children folders that could be Java installations according to their name
169
20
240,564
def _get_from_java_home ( self ) : # Get the environment variable java_home = os . getenv ( "JAVA_HOME" ) if java_home and os . path . exists ( java_home ) : # Get the real installation path java_home = os . path . realpath ( java_home ) # Cygwin has a bug in realpath if not os . path . exists ( java_home ) : java_home = os . getenv ( "JAVA_HOME" ) # Look for the library file return self . find_libjvm ( java_home )
Retrieves the Java library path according to the JAVA_HOME environment variable
131
17
240,565
def _get_from_known_locations ( self ) : for home in self . find_possible_homes ( self . _locations ) : jvm = self . find_libjvm ( home ) if jvm is not None : return jvm
Retrieves the first existing Java library path in the predefined known locations
57
15
240,566
def node_query ( self , node ) : if isinstance ( node , ast . Call ) : assert node . args arg = node . args [ 0 ] if not isinstance ( arg , ast . Str ) : return else : raise TypeError ( type ( node ) ) return arg . s
Return the query for the gql call node
61
9
240,567
def default ( thumbnailer , prepared_options , source_filename , thumbnail_extension , * * kwargs ) : filename_parts = [ source_filename ] if ( '%(opts)s' in thumbnailer . thumbnail_basedir or '%(opts)s' in thumbnailer . thumbnail_subdir ) : if thumbnail_extension != os . path . splitext ( source_filename ) [ 1 ] [ 1 : ] : filename_parts . append ( thumbnail_extension ) else : filename_parts += [ '_' . join ( prepared_options ) , thumbnail_extension ] return '.' . join ( filename_parts )
Easy - thumbnails default name processor .
142
8
240,568
def hashed ( source_filename , prepared_options , thumbnail_extension , * * kwargs ) : parts = ':' . join ( [ source_filename ] + prepared_options ) short_sha = hashlib . sha1 ( parts . encode ( 'utf-8' ) ) . digest ( ) short_hash = base64 . urlsafe_b64encode ( short_sha [ : 9 ] ) . decode ( 'utf-8' ) return '.' . join ( [ short_hash , thumbnail_extension ] )
Generate a short hashed thumbnail filename .
117
9
240,569
def source_hashed ( source_filename , prepared_options , thumbnail_extension , * * kwargs ) : source_sha = hashlib . sha1 ( source_filename . encode ( 'utf-8' ) ) . digest ( ) source_hash = base64 . urlsafe_b64encode ( source_sha [ : 9 ] ) . decode ( 'utf-8' ) parts = ':' . join ( prepared_options [ 1 : ] ) parts_sha = hashlib . sha1 ( parts . encode ( 'utf-8' ) ) . digest ( ) options_hash = base64 . urlsafe_b64encode ( parts_sha [ : 6 ] ) . decode ( 'utf-8' ) return '%s_%s_%s.%s' % ( source_hash , prepared_options [ 0 ] , options_hash , thumbnail_extension )
Generate a thumbnail filename of the source filename and options separately hashed along with the size .
197
19
240,570
def save_image ( image , destination = None , filename = None , * * options ) : if destination is None : destination = BytesIO ( ) filename = filename or '' # Ensure plugins are fully loaded so that Image.EXTENSION is populated. Image . init ( ) format = Image . EXTENSION . get ( os . path . splitext ( filename ) [ 1 ] . lower ( ) , 'JPEG' ) if format in ( 'JPEG' , 'WEBP' ) : options . setdefault ( 'quality' , 85 ) saved = False if format == 'JPEG' : if image . mode . endswith ( 'A' ) : # From PIL 4.2, saving an image with a transparency layer raises an # IOError, so explicitly remove it. image = image . convert ( image . mode [ : - 1 ] ) if settings . THUMBNAIL_PROGRESSIVE and ( max ( image . size ) >= settings . THUMBNAIL_PROGRESSIVE ) : options [ 'progressive' ] = True try : image . save ( destination , format = format , optimize = 1 , * * options ) saved = True except IOError : # Try again, without optimization (PIL can't optimize an image # larger than ImageFile.MAXBLOCK, which is 64k by default). This # shouldn't be triggered very often these days, as recent versions # of pillow avoid the MAXBLOCK limitation. pass if not saved : image . save ( destination , format = format , * * options ) if hasattr ( destination , 'seek' ) : destination . seek ( 0 ) return destination
Save a PIL image .
348
6
240,571
def generate_source_image ( source_file , processor_options , generators = None , fail_silently = True ) : processor_options = ThumbnailOptions ( processor_options ) # Keep record of whether the source file was originally closed. Not all # file-like objects provide this attribute, so just fall back to False. was_closed = getattr ( source_file , 'closed' , False ) if generators is None : generators = [ utils . dynamic_import ( name ) for name in settings . THUMBNAIL_SOURCE_GENERATORS ] exceptions = [ ] try : for generator in generators : source = source_file # First try to open the file. try : source . open ( ) except Exception : # If that failed, maybe the file-like object doesn't support # reopening so just try seeking back to the start of the file. try : source . seek ( 0 ) except Exception : source = None try : image = generator ( source , * * processor_options ) except Exception as e : if not fail_silently : if len ( generators ) == 1 : raise exceptions . append ( e ) image = None if image : return image finally : # Attempt to close the file if it was closed originally (but fail # silently). if was_closed : try : source_file . close ( ) except Exception : pass if exceptions and not fail_silently : raise NoSourceGenerator ( * exceptions )
Processes a source File through a series of source generators stopping once a generator returns an image .
302
19
240,572
def revert ( self ) : for attr , value in self . _changed . items ( ) : setattr ( django_settings , attr , value ) for attr in self . _added : delattr ( django_settings , attr ) self . _changed = { } self . _added = [ ] if self . isolated : self . _isolated_overrides = BaseSettings ( )
Revert any changes made to settings .
87
9
240,573
def pil_image ( source , exif_orientation = True , * * options ) : # Use a BytesIO wrapper because if the source is an incomplete file like # object, PIL may have problems with it. For example, some image types # require tell and seek methods that are not present on all storage # File objects. if not source : return source = BytesIO ( source . read ( ) ) image = Image . open ( source ) # Fully load the image now to catch any problems with the image contents. try : # An "Image file truncated" exception can occur for some images that # are still mostly valid -- we'll swallow the exception. image . load ( ) except IOError : pass # Try a second time to catch any other potential exceptions. image . load ( ) if exif_orientation : image = utils . exif_orientation ( image ) return image
Try to open the source file directly using PIL ignoring any errors .
188
14
240,574
def optimize_thumbnail ( thumbnail ) : try : optimize_command = settings . THUMBNAIL_OPTIMIZE_COMMAND [ determinetype ( thumbnail . path ) ] if not optimize_command : return except ( TypeError , KeyError , NotImplementedError ) : return storage = thumbnail . storage try : with NamedTemporaryFile ( ) as temp_file : thumbnail . seek ( 0 ) temp_file . write ( thumbnail . read ( ) ) temp_file . flush ( ) optimize_command = optimize_command . format ( filename = temp_file . name ) output = check_output ( optimize_command , stderr = subprocess . STDOUT , shell = True ) if output : logger . warning ( '{0} returned {1}' . format ( optimize_command , output ) ) else : logger . info ( '{0} returned nothing' . format ( optimize_command ) ) with open ( temp_file . name , 'rb' ) as f : thumbnail . file = ContentFile ( f . read ( ) ) storage . delete ( thumbnail . path ) storage . save ( thumbnail . path , thumbnail ) except Exception as e : logger . error ( e )
Optimize thumbnail images by removing unnecessary data
255
8
240,575
def thumbnail ( parser , token ) : args = token . split_contents ( ) tag = args [ 0 ] # Check to see if we're setting to a context variable. if len ( args ) > 4 and args [ - 2 ] == 'as' : context_name = args [ - 1 ] args = args [ : - 2 ] else : context_name = None if len ( args ) < 3 : raise TemplateSyntaxError ( "Invalid syntax. Expected " "'{%% %s source size [option1 option2 ...] %%}' or " "'{%% %s source size [option1 option2 ...] as variable %%}'" % ( tag , tag ) ) opts = { } # The first argument is the source file. source_var = parser . compile_filter ( args [ 1 ] ) # The second argument is the requested size. If it's the static "10x10" # format, wrap it in quotes so that it is compiled correctly. size = args [ 2 ] match = RE_SIZE . match ( size ) if match : size = '"%s"' % size opts [ 'size' ] = parser . compile_filter ( size ) # All further arguments are options. args_list = split_args ( args [ 3 : ] ) . items ( ) for arg , value in args_list : if arg in VALID_OPTIONS : if value and value is not True : value = parser . compile_filter ( value ) opts [ arg ] = value else : raise TemplateSyntaxError ( "'%s' tag received a bad argument: " "'%s'" % ( tag , arg ) ) return ThumbnailNode ( source_var , opts = opts , context_name = context_name )
Creates a thumbnail of an ImageField .
373
9
240,576
def thumbnail_url ( source , alias ) : try : thumb = get_thumbnailer ( source ) [ alias ] except Exception : return '' return thumb . url
Return the thumbnail url for a source file using an aliased set of thumbnail options .
34
17
240,577
def data_uri ( thumbnail ) : try : thumbnail . open ( 'rb' ) data = thumbnail . read ( ) finally : thumbnail . close ( ) mime_type = mimetypes . guess_type ( str ( thumbnail . file ) ) [ 0 ] or 'application/octet-stream' data = b64encode ( data ) . decode ( 'utf-8' ) return 'data:{0};base64,{1}' . format ( mime_type , data )
This filter will return the base64 encoded data URI for a given thumbnail object .
105
16
240,578
def read_files ( * filenames ) : output = [ ] for filename in filenames : f = codecs . open ( filename , encoding = 'utf-8' ) try : output . append ( f . read ( ) ) finally : f . close ( ) return '\n\n' . join ( output )
Output the contents of one or more files to a single concatenated string .
70
16
240,579
def all_thumbnails ( path , recursive = True , prefix = None , subdir = None ) : if prefix is None : prefix = settings . THUMBNAIL_PREFIX if subdir is None : subdir = settings . THUMBNAIL_SUBDIR thumbnail_files = { } if not path . endswith ( '/' ) : path = '%s/' % path len_path = len ( path ) if recursive : all = os . walk ( path ) else : files = [ ] for file in os . listdir ( path ) : if os . path . isfile ( os . path . join ( path , file ) ) : files . append ( file ) all = [ ( path , [ ] , files ) ] for dir_ , subdirs , files in all : rel_dir = dir_ [ len_path : ] for file in files : thumb = re_thumbnail_file . match ( file ) if not thumb : continue d = thumb . groupdict ( ) source_filename = d . pop ( 'source_filename' ) if prefix : source_path , source_filename = os . path . split ( source_filename ) if not source_filename . startswith ( prefix ) : continue source_filename = os . path . join ( source_path , source_filename [ len ( prefix ) : ] ) d [ 'options' ] = d [ 'options' ] and d [ 'options' ] . split ( '_' ) or [ ] if subdir and rel_dir . endswith ( subdir ) : rel_dir = rel_dir [ : - len ( subdir ) ] # Corner-case bug: if the filename didn't have an extension but did # have an underscore, the last underscore will get converted to a # '.'. m = re . match ( r'(.*)_(.*)' , source_filename ) if m : source_filename = '%s.%s' % m . groups ( ) filename = os . path . join ( rel_dir , source_filename ) thumbnail_file = thumbnail_files . setdefault ( filename , [ ] ) d [ 'filename' ] = os . path . join ( dir_ , file ) thumbnail_file . append ( d ) return thumbnail_files
Return a dictionary referencing all files which match the thumbnail format .
485
12
240,580
def thumbnails_for_file ( relative_source_path , root = None , basedir = None , subdir = None , prefix = None ) : if root is None : root = settings . MEDIA_ROOT if prefix is None : prefix = settings . THUMBNAIL_PREFIX if subdir is None : subdir = settings . THUMBNAIL_SUBDIR if basedir is None : basedir = settings . THUMBNAIL_BASEDIR source_dir , filename = os . path . split ( relative_source_path ) thumbs_path = os . path . join ( root , basedir , source_dir , subdir ) if not os . path . isdir ( thumbs_path ) : return [ ] files = all_thumbnails ( thumbs_path , recursive = False , prefix = prefix , subdir = '' ) return files . get ( filename , [ ] )
Return a list of dictionaries one for each thumbnail belonging to the source image .
195
16
240,581
def delete_thumbnails ( relative_source_path , root = None , basedir = None , subdir = None , prefix = None ) : thumbs = thumbnails_for_file ( relative_source_path , root , basedir , subdir , prefix ) return _delete_using_thumbs_list ( thumbs )
Delete all thumbnails for a source image .
68
9
240,582
def delete_all_thumbnails ( path , recursive = True ) : total = 0 for thumbs in all_thumbnails ( path , recursive = recursive ) . values ( ) : total += _delete_using_thumbs_list ( thumbs ) return total
Delete all files within a path which match the thumbnails pattern .
51
13
240,583
def signal_committed_filefields ( sender , instance , * * kwargs ) : for field_name in getattr ( instance , '_uncommitted_filefields' , ( ) ) : fieldfile = getattr ( instance , field_name ) # Don't send the signal for deleted files. if fieldfile : signals . saved_file . send_robust ( sender = sender , fieldfile = fieldfile )
A post_save signal handler which sends a signal for each FileField that was committed this save .
91
20
240,584
def generate_aliases ( fieldfile , * * kwargs ) : # Avoids circular import. from easy_thumbnails . files import generate_all_aliases generate_all_aliases ( fieldfile , include_global = False )
A saved_file signal handler which generates thumbnails for all field model and app specific aliases matching the saved file s field .
51
25
240,585
def generate_aliases_global ( fieldfile , * * kwargs ) : # Avoids circular import. from easy_thumbnails . files import generate_all_aliases generate_all_aliases ( fieldfile , include_global = True )
A saved_file signal handler which generates thumbnails for all field model and app specific aliases matching the saved file s field also generating thumbnails for each project - wide alias .
53
35
240,586
def colorspace ( im , bw = False , replace_alpha = False , * * kwargs ) : if im . mode == 'I' : # PIL (and pillow) have can't convert 16 bit grayscale images to lower # modes, so manually convert them to an 8 bit grayscale. im = im . point ( list ( _points_table ( ) ) , 'L' ) is_transparent = utils . is_transparent ( im ) is_grayscale = im . mode in ( 'L' , 'LA' ) new_mode = im . mode if is_grayscale or bw : new_mode = 'L' else : new_mode = 'RGB' if is_transparent : if replace_alpha : if im . mode != 'RGBA' : im = im . convert ( 'RGBA' ) base = Image . new ( 'RGBA' , im . size , replace_alpha ) base . paste ( im , mask = im ) im = base else : new_mode = new_mode + 'A' if im . mode != new_mode : im = im . convert ( new_mode ) return im
Convert images to the correct color space .
252
9
240,587
def autocrop ( im , autocrop = False , * * kwargs ) : if autocrop : # If transparent, flatten. if utils . is_transparent ( im ) : no_alpha = Image . new ( 'L' , im . size , ( 255 ) ) no_alpha . paste ( im , mask = im . split ( ) [ - 1 ] ) else : no_alpha = im . convert ( 'L' ) # Convert to black and white image. bw = no_alpha . convert ( 'L' ) # bw = bw.filter(ImageFilter.MedianFilter) # White background. bg = Image . new ( 'L' , im . size , 255 ) bbox = ImageChops . difference ( bw , bg ) . getbbox ( ) if bbox : im = im . crop ( bbox ) return im
Remove any unnecessary whitespace from the edges of the source image .
192
13
240,588
def filters ( im , detail = False , sharpen = False , * * kwargs ) : if detail : im = im . filter ( ImageFilter . DETAIL ) if sharpen : im = im . filter ( ImageFilter . SHARPEN ) return im
Pass the source image through post - processing filters .
56
10
240,589
def background ( im , size , background = None , * * kwargs ) : if not background : # Primary option not given, nothing to do. return im if not size [ 0 ] or not size [ 1 ] : # One of the dimensions aren't specified, can't do anything. return im x , y = im . size if x >= size [ 0 ] and y >= size [ 1 ] : # The image is already equal to (or larger than) the expected size, so # there's nothing to do. return im im = colorspace ( im , replace_alpha = background , * * kwargs ) new_im = Image . new ( 'RGB' , size , background ) if new_im . mode != im . mode : new_im = new_im . convert ( im . mode ) offset = ( size [ 0 ] - x ) // 2 , ( size [ 1 ] - y ) // 2 new_im . paste ( im , offset ) return new_im
Add borders of a certain color to make the resized image fit exactly within the dimensions given .
208
19
240,590
def generate_all_aliases ( fieldfile , include_global ) : all_options = aliases . all ( fieldfile , include_global = include_global ) if all_options : thumbnailer = get_thumbnailer ( fieldfile ) for key , options in six . iteritems ( all_options ) : options [ 'ALIAS' ] = key thumbnailer . get_thumbnail ( options )
Generate all of a file s aliases .
86
9
240,591
def _get_image ( self ) : if not hasattr ( self , '_image_cache' ) : from easy_thumbnails . source_generators import pil_image self . image = pil_image ( self ) return self . _image_cache
Get a PIL Image instance of this file .
54
10
240,592
def _set_image ( self , image ) : if image : self . _image_cache = image self . _dimensions_cache = image . size else : if hasattr ( self , '_image_cache' ) : del self . _cached_image if hasattr ( self , '_dimensions_cache' ) : del self . _dimensions_cache
Set the image for this file .
80
7
240,593
def set_image_dimensions ( self , thumbnail ) : try : dimensions = getattr ( thumbnail , 'dimensions' , None ) except models . ThumbnailDimensions . DoesNotExist : dimensions = None if not dimensions : return False self . _dimensions_cache = dimensions . size return self . _dimensions_cache
Set image dimensions from the cached dimensions of a Thumbnail model instance .
70
14
240,594
def generate_thumbnail ( self , thumbnail_options , high_resolution = False , silent_template_exception = False ) : thumbnail_options = self . get_options ( thumbnail_options ) orig_size = thumbnail_options [ 'size' ] # remember original size # Size sanity check. min_dim , max_dim = 0 , 0 for dim in orig_size : try : dim = int ( dim ) except ( TypeError , ValueError ) : continue min_dim , max_dim = min ( min_dim , dim ) , max ( max_dim , dim ) if max_dim == 0 or min_dim < 0 : raise exceptions . EasyThumbnailsError ( "The source image is an invalid size (%sx%s)" % orig_size ) if high_resolution : thumbnail_options [ 'size' ] = ( orig_size [ 0 ] * 2 , orig_size [ 1 ] * 2 ) image = engine . generate_source_image ( self , thumbnail_options , self . source_generators , fail_silently = silent_template_exception ) if image is None : raise exceptions . InvalidImageFormatError ( "The source file does not appear to be an image" ) thumbnail_image = engine . process_image ( image , thumbnail_options , self . thumbnail_processors ) if high_resolution : thumbnail_options [ 'size' ] = orig_size # restore original size filename = self . get_thumbnail_name ( thumbnail_options , transparent = utils . is_transparent ( thumbnail_image ) , high_resolution = high_resolution ) quality = thumbnail_options [ 'quality' ] subsampling = thumbnail_options [ 'subsampling' ] img = engine . save_image ( thumbnail_image , filename = filename , quality = quality , subsampling = subsampling ) data = img . read ( ) thumbnail = ThumbnailFile ( filename , file = ContentFile ( data ) , storage = self . thumbnail_storage , thumbnail_options = thumbnail_options ) thumbnail . image = thumbnail_image thumbnail . _committed = False return thumbnail
Return an unsaved ThumbnailFile containing a thumbnail image .
447
12
240,595
def get_existing_thumbnail ( self , thumbnail_options , high_resolution = False ) : thumbnail_options = self . get_options ( thumbnail_options ) names = [ self . get_thumbnail_name ( thumbnail_options , transparent = False , high_resolution = high_resolution ) ] transparent_name = self . get_thumbnail_name ( thumbnail_options , transparent = True , high_resolution = high_resolution ) if transparent_name not in names : names . append ( transparent_name ) for filename in names : exists = self . thumbnail_exists ( filename ) if exists : thumbnail_file = ThumbnailFile ( name = filename , storage = self . thumbnail_storage , thumbnail_options = thumbnail_options ) if settings . THUMBNAIL_CACHE_DIMENSIONS : # If this wasn't local storage, exists will be a thumbnail # instance so we can store the image dimensions now to save # a future potential query. thumbnail_file . set_image_dimensions ( exists ) return thumbnail_file
Return a ThumbnailFile containing an existing thumbnail for a set of thumbnail options or None if not found .
222
21
240,596
def get_thumbnail ( self , thumbnail_options , save = True , generate = None , silent_template_exception = False ) : thumbnail_options = self . get_options ( thumbnail_options ) if generate is None : generate = self . generate thumbnail = self . get_existing_thumbnail ( thumbnail_options ) if not thumbnail : if generate : thumbnail = self . generate_thumbnail ( thumbnail_options , silent_template_exception = silent_template_exception ) if save : self . save_thumbnail ( thumbnail ) else : signals . thumbnail_missed . send ( sender = self , options = thumbnail_options , high_resolution = False ) if 'HIGH_RESOLUTION' in thumbnail_options : generate_high_resolution = thumbnail_options . get ( 'HIGH_RESOLUTION' ) else : generate_high_resolution = self . thumbnail_high_resolution if generate_high_resolution : thumbnail . high_resolution = self . get_existing_thumbnail ( thumbnail_options , high_resolution = True ) if not thumbnail . high_resolution : if generate : thumbnail . high_resolution = self . generate_thumbnail ( thumbnail_options , high_resolution = True , silent_template_exception = silent_template_exception ) if save : self . save_thumbnail ( thumbnail . high_resolution ) else : signals . thumbnail_missed . send ( sender = self , options = thumbnail_options , high_resolution = False ) return thumbnail
Return a ThumbnailFile containing a thumbnail .
317
9
240,597
def save_thumbnail ( self , thumbnail ) : filename = thumbnail . name try : self . thumbnail_storage . delete ( filename ) except Exception : pass self . thumbnail_storage . save ( filename , thumbnail ) thumb_cache = self . get_thumbnail_cache ( thumbnail . name , create = True , update = True ) # Cache thumbnail dimensions. if settings . THUMBNAIL_CACHE_DIMENSIONS : dimensions_cache , created = ( models . ThumbnailDimensions . objects . get_or_create ( thumbnail = thumb_cache , defaults = { 'width' : thumbnail . width , 'height' : thumbnail . height } ) ) if not created : dimensions_cache . width = thumbnail . width dimensions_cache . height = thumbnail . height dimensions_cache . save ( ) signals . thumbnail_created . send ( sender = thumbnail )
Save a thumbnail to the thumbnail_storage .
182
9
240,598
def thumbnail_exists ( self , thumbnail_name ) : if self . remote_source : return False if utils . is_storage_local ( self . source_storage ) : source_modtime = utils . get_modified_time ( self . source_storage , self . name ) else : source = self . get_source_cache ( ) if not source : return False source_modtime = source . modified if not source_modtime : return False local_thumbnails = utils . is_storage_local ( self . thumbnail_storage ) if local_thumbnails : thumbnail_modtime = utils . get_modified_time ( self . thumbnail_storage , thumbnail_name ) if not thumbnail_modtime : return False return source_modtime <= thumbnail_modtime thumbnail = self . get_thumbnail_cache ( thumbnail_name ) if not thumbnail : return False thumbnail_modtime = thumbnail . modified if thumbnail . modified and source_modtime <= thumbnail . modified : return thumbnail return False
Calculate whether the thumbnail already exists and that the source is not newer than the thumbnail .
213
19
240,599
def save ( self , name , content , * args , * * kwargs ) : super ( ThumbnailerFieldFile , self ) . save ( name , content , * args , * * kwargs ) self . get_source_cache ( create = True , update = True )
Save the file also saving a reference to the thumbnail cache Source model .
61
14