idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
245,300
def load_views ( self ) : view_path = os . path . join ( self . path , StatikProject . VIEWS_DIR ) logger . debug ( "Loading views from: %s" , view_path ) if not os . path . isdir ( view_path ) : raise MissingProjectFolderError ( StatikProject . VIEWS_DIR ) view_files = list_files ( view_path , [ 'yml' , 'yaml' ] ) logger . debug ( "Found %d view(s) in project" , len ( view_files ) ) views = { } for view_file in view_files : view_name = extract_filename ( view_file ) views [ view_name ] = StatikView ( filename = os . path . join ( view_path , view_file ) , encoding = self . config . encoding , name = view_name , models = self . models , template_engine = self . template_engine , error_context = self . error_context ) return views
Loads the views for this project from the project directory structure .
219
13
245,301
def process_views ( self ) : output = { } logger . debug ( "Processing %d view(s)..." , len ( self . views ) ) for view_name , view in iteritems ( self . views ) : try : output = deep_merge_dict ( output , view . process ( self . db , safe_mode = self . safe_mode , extra_context = self . project_context ) ) except StatikError as exc : # just re-raise it raise exc except Exception as exc : # for unhandled view-related exceptions, raise our own exception raise ViewError ( message = "Failed to render view \"%s\"." % view_name , orig_exc = exc ) return output
Processes the loaded views to generate the required output data .
154
12
245,302
def dump_in_memory_result ( self , result , output_path ) : file_count = 0 logger . debug ( "Dumping in-memory processing results to output folder: %s" , output_path ) for k , v in iteritems ( result ) : cur_output_path = os . path . join ( output_path , k ) if isinstance ( v , dict ) : file_count += self . dump_in_memory_result ( v , cur_output_path ) else : if not os . path . isdir ( output_path ) : os . makedirs ( output_path ) filename = os . path . join ( output_path , k ) logger . debug ( "Writing output file: %s" , filename ) # dump the contents of the file with open ( filename , 'wt' , encoding = self . config . encoding ) as f : f . write ( v ) file_count += 1 return file_count
Recursively dumps the result of our processing into files within the given output path .
204
17
245,303
def copy_assets ( self , output_path ) : src_paths = [ ] # if we have a theme if self . config . theme is not None : # assume it's in the folder: "themes/theme_name/assets" src_paths . append ( os . path . join ( self . path , StatikProject . THEMES_DIR , self . config . theme , StatikProject . ASSETS_DIR ) ) # NOTE: Adding the theme's assets directory *before* the project's internal assets # directory always ensures that the project's own assets are copied *after* the # theme's, thereby ensuring that the project's assets folder takes precedence # over the theme's. # always attempt to copy from our base assets folder if os . path . isabs ( self . config . assets_src_path ) : src_paths . append ( self . config . assets_src_path ) else : src_paths . append ( os . path . join ( self . path , self . config . assets_src_path ) ) for src_path in src_paths : if os . path . exists ( src_path ) and os . path . isdir ( src_path ) : dest_path = self . config . assets_dest_path if not os . path . isabs ( dest_path ) : dest_path = os . path . join ( output_path , dest_path ) asset_count = copy_tree ( src_path , dest_path ) logger . info ( "Copied %s asset(s) from %s to %s" , asset_count , src_path , dest_path ) else : logger . info ( "Missing assets source path - skipping copying of assets: %s" , src_path )
Copies all asset files from the source path to the destination path . If no such source path exists no asset copying will be performed .
377
27
245,304
def autogen ( project_path ) : generate_quickstart ( project_path ) project = StatikProject ( project_path ) project . config = StatikConfig ( project . config_file_path ) models = list ( project . load_models ( ) . values ( ) ) logger . info ( 'Creating view and template for home page (index.html).' ) generate_yaml_file ( os . path . join ( project_path , StatikProject . VIEWS_DIR , 'index.yaml' ) , { 'path' : '/' , 'template' : 'index' } ) generate_index_file ( os . path . join ( project_path , StatikProject . TEMPLATES_DIR , 'index.jinja2' ) ) for model in models : logger . info ( 'Creating view and template for model: %s' % model . name ) generate_yaml_file ( os . path . join ( project_path , StatikProject . VIEWS_DIR , '%s.yaml' % model . name ) , { 'path' : { 'template' : '/%s/{{ %s.pk }}' % ( model . name , model . name ) , 'for-each' : { '%s' % model . name : 'session.query(%s).all()' % model . name } } , 'template' : ( '%s' % model . name ) , } ) generate_model_file ( os . path . join ( project_path , StatikProject . TEMPLATES_DIR , '%s.jinja2' % model . name ) , project , model , model . fields . values ( ) )
Autogenerates views and templates for all the models in the project .
367
15
245,305
def generate_yaml_file ( filename , contents ) : with open ( filename , 'w' ) as file : file . write ( yaml . dump ( contents , default_flow_style = False ) )
Creates a yaml file with the given content .
45
11
245,306
def generate_index_file ( filename ) : with open ( filename , 'w' ) as file : content = open ( os . path . join ( os . path . dirname ( __file__ ) , 'templates/index_page.html' ) , 'r' ) . read ( ) file . write ( content )
Constructs a default home page for the project .
70
10
245,307
def generate_model_file ( filename , project , model , fields ) : for field in fields : field . type = field . __class__ . __name__ content = open ( os . path . join ( os . path . dirname ( __file__ ) , 'templates/model_page.html' ) , 'r' ) . read ( ) engine = StatikTemplateEngine ( project ) template = engine . create_template ( content ) # create context and update from project.config context = { 'model' : model , 'fields' : fields } context . update ( dict ( project . config . context_static ) ) string = template . render ( context ) with open ( filename , 'w' ) as file : file . write ( string )
Creates a webpage for a given instance of a model .
160
12
245,308
def build_dynamic ( self , db , extra = None , safe_mode = False ) : result = dict ( ) for var , query in iteritems ( self . dynamic ) : result [ var ] = db . query ( query , safe_mode = safe_mode , additional_locals = extra ) return result
Builds the dynamic context based on our current dynamic context entity and the given database .
67
17
245,309
def build_for_each ( self , db , safe_mode = False , extra = None ) : result = dict ( ) for var , query in iteritems ( self . for_each ) : result [ var ] = db . query ( query , additional_locals = extra , safe_mode = safe_mode ) return result
Builds the for - each context .
70
8
245,310
def build ( self , db = None , safe_mode = False , for_each_inst = None , extra = None ) : result = copy ( self . initial ) result . update ( self . static ) if self . dynamic : result . update ( self . build_dynamic ( db , extra = extra , safe_mode = safe_mode ) ) if self . for_each and for_each_inst : result . update ( self . build_for_each ( db , safe_mode = safe_mode , extra = extra ) ) if isinstance ( extra , dict ) : result . update ( extra ) return result
Builds a dictionary that can be used as context for template rendering .
132
14
245,311
def template_exception_handler ( fn , error_context , filename = None ) : error_message = None if filename : error_context . update ( filename = filename ) try : return fn ( ) except jinja2 . TemplateSyntaxError as exc : error_context . update ( filename = exc . filename , line_no = exc . lineno ) error_message = exc . message except jinja2 . TemplateError as exc : error_message = exc . message except Exception as exc : error_message = "%s" % exc raise TemplateError ( message = error_message , context = error_context )
Calls the given function attempting to catch any template - related errors and converts the error to a Statik TemplateError instance . Returns the result returned by the function itself .
132
34
245,312
def create_template ( self , s , provider_name = None ) : if provider_name is None : provider_name = self . supported_providers [ 0 ] return template_exception_handler ( lambda : self . get_provider ( provider_name ) . create_template ( s ) , self . error_context )
Creates a template from the given string based on the specified provider or the provider with highest precedence .
71
20
245,313
def construct_field ( model_name , field_name , field_type , all_models , * * kwargs ) : field_type_parts = field_type . split ( '->' ) _field_type = field_type_parts [ 0 ] . strip ( ) . split ( '[]' ) [ 0 ] . strip ( ) back_populates = field_type_parts [ 1 ] . strip ( ) if len ( field_type_parts ) > 1 else None error_context = kwargs . pop ( 'error_context' , StatikErrorContext ( ) ) _kwargs = copy ( kwargs ) _kwargs [ 'back_populates' ] = back_populates if _field_type not in FIELD_TYPES and _field_type not in all_models : raise InvalidFieldTypeError ( model_name , field_name , context = error_context ) if _field_type in FIELD_TYPES : return FIELD_TYPES [ _field_type ] ( field_name , * * _kwargs ) if field_type_parts [ 0 ] . strip ( ) . endswith ( '[]' ) : return StatikManyToManyField ( field_name , _field_type , * * _kwargs ) return StatikForeignKeyField ( field_name , _field_type , * * _kwargs )
Helper function to build a field from the given field name and type .
303
14
245,314
def paginate ( db_query , items_per_page , offset = 0 , start_page = 1 ) : return Paginator ( db_query , items_per_page , offset = offset , start_page = start_page )
Instantiates a Paginator instance for database queries .
51
11
245,315
def render_reverse ( self , inst = None , context = None ) : rendered = self . render ( inst = inst , context = context ) parts = rendered . split ( '/' ) # we only prettify URLs for these files if parts [ - 1 ] in [ 'index.html' , 'index.htm' ] : return ( '/' . join ( parts [ : - 1 ] ) ) + '/' return rendered
Renders the reverse URL for this path .
89
9
245,316
def create ( cls , path , template_engine = None , output_filename = None , output_ext = None , view_name = None ) : # if it's a complex view if isinstance ( path , dict ) : return StatikViewComplexPath ( path , template_engine , output_filename = output_filename , output_ext = output_ext , view_name = view_name ) elif isinstance ( path , basestring ) : return StatikViewSimplePath ( path , output_filename = output_filename , output_ext = output_ext , view_name = view_name ) else : raise ValueError ( "Unrecognised structure for \"path\" configuration in view: %s" % view_name )
Create the relevant subclass of StatikView based on the given path variable and parameters .
159
17
245,317
def render ( self , context , db = None , safe_mode = False , extra_context = None ) : if not db : raise MissingParameterError ( "db" , context = self . error_context ) rendered_views = dict ( ) path_instances = db . query ( self . path . query , safe_mode = safe_mode ) extra_ctx = copy ( extra_context ) if extra_context else dict ( ) for inst in path_instances : extra_ctx . update ( { self . path . variable : inst } ) ctx = context . build ( db = db , safe_mode = safe_mode , for_each_inst = inst , extra = extra_ctx ) inst_path = self . path . render ( inst = inst , context = ctx ) rendered_view = self . template . render ( ctx ) rendered_views = deep_merge_dict ( rendered_views , dict_from_path ( inst_path , final_value = rendered_view ) ) return rendered_views
Renders the given context using the specified database returning a dictionary containing path segments and rendered view contents .
220
20
245,318
def render ( self , db , safe_mode = False , extra_context = None ) : return self . renderer . render ( self . context , db , safe_mode = safe_mode , extra_context = extra_context )
Renders this view given the specified StatikDatabase instance .
50
12
245,319
def _validate_number_of_layers ( self , number_of_layers ) : # Only positive numbers are correct if number_of_layers <= 0 : raise SquashError ( "Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers ) # Do not squash if provided number of layer to squash is bigger # than number of actual layers in the image if number_of_layers > len ( self . old_image_layers ) : raise SquashError ( "Cannot squash %s layers, the %s image contains only %s layers" % ( number_of_layers , self . image , len ( self . old_image_layers ) ) )
Makes sure that the specified number of layers to squash is a valid number
160
15
245,320
def _files_in_layers ( self , layers , directory ) : files = { } for layer in layers : self . log . debug ( "Generating list of files in layer '%s'..." % layer ) tar_file = os . path . join ( directory , layer , "layer.tar" ) with tarfile . open ( tar_file , 'r' , format = tarfile . PAX_FORMAT ) as tar : files [ layer ] = [ self . _normalize_path ( x ) for x in tar . getnames ( ) ] self . log . debug ( "Done, found %s files" % len ( files [ layer ] ) ) return files
Prepare a list of files in all layers
145
9
245,321
def _prepare_tmp_directory ( self , tmp_dir ) : if tmp_dir : if os . path . exists ( tmp_dir ) : raise SquashError ( "The '%s' directory already exists, please remove it before you proceed" % tmp_dir ) os . makedirs ( tmp_dir ) else : tmp_dir = tempfile . mkdtemp ( prefix = "docker-squash-" ) self . log . debug ( "Using %s as the temporary directory" % tmp_dir ) return tmp_dir
Creates temporary directory that is used to work on layers
116
11
245,322
def _layers_to_squash ( self , layers , from_layer ) : to_squash = [ ] to_leave = [ ] should_squash = True for l in reversed ( layers ) : if l == from_layer : should_squash = False if should_squash : to_squash . append ( l ) else : to_leave . append ( l ) to_squash . reverse ( ) to_leave . reverse ( ) return to_squash , to_leave
Prepares a list of layer IDs that should be squashed
108
12
245,323
def _save_image ( self , image_id , directory ) : for x in [ 0 , 1 , 2 ] : self . log . info ( "Saving image %s to %s directory..." % ( image_id , directory ) ) self . log . debug ( "Try #%s..." % ( x + 1 ) ) try : image = self . docker . get_image ( image_id ) if docker . version_info [ 0 ] < 3 : # Docker library prior to 3.0.0 returned the requests # object directly which cold be used to read from self . log . debug ( "Extracting image using HTTPResponse object directly" ) self . _extract_tar ( image , directory ) else : # Docker library >=3.0.0 returns iterator over raw data self . log . debug ( "Extracting image using iterator over raw data" ) fd_r , fd_w = os . pipe ( ) r = os . fdopen ( fd_r , 'rb' ) w = os . fdopen ( fd_w , 'wb' ) extracter = threading . Thread ( target = self . _extract_tar , args = ( r , directory ) ) extracter . start ( ) for chunk in image : w . write ( chunk ) w . flush ( ) w . close ( ) extracter . join ( ) r . close ( ) self . log . info ( "Image saved!" ) return True except Exception as e : self . log . exception ( e ) self . log . warn ( "An error occured while saving the %s image, retrying..." % image_id ) raise SquashError ( "Couldn't save %s image!" % image_id )
Saves the image as a tar archive under specified name
373
11
245,324
def _unpack ( self , tar_file , directory ) : self . log . info ( "Unpacking %s tar file to %s directory" % ( tar_file , directory ) ) with tarfile . open ( tar_file , 'r' ) as tar : tar . extractall ( path = directory ) self . log . info ( "Archive unpacked!" )
Unpacks tar archive to selected directory
80
7
245,325
def _parse_image_name ( self , image ) : if ':' in image and '/' not in image . split ( ':' ) [ - 1 ] : image_tag = image . split ( ':' ) [ - 1 ] image_name = image [ : - ( len ( image_tag ) + 1 ) ] else : image_tag = "latest" image_name = image return ( image_name , image_tag )
Parses the provided image name and splits it in the name and tag part if possible . If no tag is provided latest is used .
93
28
245,326
def _dump_json ( self , data , new_line = False ) : # We do not want any spaces between keys and values in JSON json_data = json . dumps ( data , separators = ( ',' , ':' ) ) if new_line : json_data = "%s\n" % json_data # Generate sha256sum of the JSON data, may be handy sha = hashlib . sha256 ( json_data . encode ( 'utf-8' ) ) . hexdigest ( ) return json_data , sha
Helper function to marshal object into JSON string . Additionally a sha256sum of the created JSON string is generated .
120
24
245,327
def _move_layers ( self , layers , src , dest ) : for layer in layers : layer_id = layer . replace ( 'sha256:' , '' ) self . log . debug ( "Moving unmodified layer '%s'..." % layer_id ) shutil . move ( os . path . join ( src , layer_id ) , dest )
This moves all the layers that should be copied as - is . In other words - all layers that are not meant to be squashed will be moved from the old image to the new image untouched .
77
40
245,328
def _marker_files ( self , tar , members ) : marker_files = { } self . log . debug ( "Searching for marker files in '%s' archive..." % tar . name ) for member in members : if '.wh.' in member . name : self . log . debug ( "Found '%s' marker file" % member . name ) marker_files [ member ] = tar . extractfile ( member ) self . log . debug ( "Done, found %s files" % len ( marker_files ) ) return marker_files
Searches for marker files in the specified archive .
118
11
245,329
def _add_markers ( self , markers , tar , files_in_layers , added_symlinks ) : if markers : self . log . debug ( "Marker files to add: %s" % [ o . name for o in markers . keys ( ) ] ) else : # No marker files to add return # https://github.com/goldmann/docker-squash/issues/108 # Some tar archives do have the filenames prefixed with './' # which does not have any effect when we unpack the tar achive, # but when processing tar content - we see this. tar_files = [ self . _normalize_path ( x ) for x in tar . getnames ( ) ] for marker , marker_file in six . iteritems ( markers ) : actual_file = marker . name . replace ( '.wh.' , '' ) normalized_file = self . _normalize_path ( actual_file ) should_be_added_back = False if self . _file_should_be_skipped ( normalized_file , added_symlinks ) : self . log . debug ( "Skipping '%s' marker file, this file is on a symlink path" % normalized_file ) continue if normalized_file in tar_files : self . log . debug ( "Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file ) continue if files_in_layers : for files in files_in_layers . values ( ) : if normalized_file in files : should_be_added_back = True break else : # There are no previous layers, so we need to add it back # In fact this shouldn't happen since having a marker file # where there is no previous layer does not make sense. should_be_added_back = True if should_be_added_back : self . log . debug ( "Adding '%s' marker file back..." % marker . name ) # Marker files on AUFS are hardlinks, we need to create # regular files, therefore we need to recreate the tarinfo # object tar . addfile ( tarfile . TarInfo ( name = marker . name ) , marker_file ) # Add the file name to the list too to avoid re-reading all files # in tar archive tar_files . append ( normalized_file ) else : self . log . debug ( "Skipping '%s' marker file..." % marker . name )
This method is responsible for adding back all markers that were not added to the squashed layer AND files they refer to can be found in layers we do not squash .
532
33
245,330
def _proc_pax ( self , filetar ) : # Read the header information. buf = filetar . fileobj . read ( self . _block ( self . size ) ) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self . type == tarfile . XGLTYPE : pax_headers = filetar . pax_headers else : pax_headers = filetar . pax_headers . copy ( ) # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re . compile ( r"(\d+) ([^=]+)=" , re . U ) pos = 0 while True : match = regex . match ( buf , pos ) if not match : break length , keyword = match . groups ( ) length = int ( length ) value = buf [ match . end ( 2 ) + 1 : match . start ( 1 ) + length - 1 ] try : keyword = keyword . decode ( "utf8" ) except Exception : pass try : value = value . decode ( "utf8" ) except Exception : pass pax_headers [ keyword ] = value pos += length # Fetch the next header. try : next = self . fromtarfile ( filetar ) except tarfile . HeaderError : raise tarfile . SubsequentHeaderError ( "missing or bad subsequent header" ) if self . type in ( tarfile . XHDTYPE , tarfile . SOLARIS_XHDTYPE ) : # Patch the TarInfo object with the extended header info. next . _apply_pax_info ( pax_headers , filetar . encoding , filetar . errors ) next . offset = self . offset if "size" in pax_headers : # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next . offset_data if next . isreg ( ) or next . type not in tarfile . SUPPORTED_TYPES : offset += next . _block ( next . size ) filetar . offset = offset return next
Process an extended or global header as described in POSIX . 1 - 2001 .
496
16
245,331
def _create_pax_generic_header ( cls , pax_headers , type = tarfile . XHDTYPE ) : records = [ ] for keyword , value in pax_headers . iteritems ( ) : try : keyword = keyword . encode ( "utf8" ) except Exception : pass try : value = value . encode ( "utf8" ) except Exception : pass l = len ( keyword ) + len ( value ) + 3 # ' ' + '=' + '\n' n = p = 0 while True : n = l + len ( str ( p ) ) if n == p : break p = n records . append ( "%d %s=%s\n" % ( p , keyword , value ) ) records = "" . join ( records ) # We use a hardcoded "././@PaxHeader" name like star does # instead of the one that POSIX recommends. info = { } info [ "name" ] = "././@PaxHeader" info [ "type" ] = type info [ "size" ] = len ( records ) info [ "magic" ] = tarfile . POSIX_MAGIC # Create pax header + record blocks. return cls . _create_header ( info , tarfile . USTAR_FORMAT ) + cls . _create_payload ( records )
Return a POSIX . 1 - 2001 extended or global header sequence that contains a list of keyword value pairs . The values must be unicode objects .
288
30
245,332
def _read_json_file ( self , json_file ) : self . log . debug ( "Reading '%s' JSON file..." % json_file ) with open ( json_file , 'r' ) as f : return json . load ( f , object_pairs_hook = OrderedDict )
Helper function to read JSON file as OrderedDict
68
11
245,333
def _read_layer_paths ( self , old_image_config , old_image_manifest , layers_to_move ) : # In manifest.json we do not have listed all layers # but only layers that do contain some data. current_manifest_layer = 0 layer_paths_to_move = [ ] layer_paths_to_squash = [ ] # Iterate over image history, from base image to top layer for i , layer in enumerate ( old_image_config [ 'history' ] ) : # If it's not an empty layer get the id # (directory name) where the layer's data is # stored if not layer . get ( 'empty_layer' , False ) : layer_id = old_image_manifest [ 'Layers' ] [ current_manifest_layer ] . rsplit ( '/' ) [ 0 ] # Check if this layer should be moved or squashed if len ( layers_to_move ) > i : layer_paths_to_move . append ( layer_id ) else : layer_paths_to_squash . append ( layer_id ) current_manifest_layer += 1 return layer_paths_to_squash , layer_paths_to_move
In case of v2 format layer id s are not the same as the id s used in the exported tar archive to name directories for layers . These id s can be found in the configuration files saved with the image - we need to read them .
273
50
245,334
def _generate_squashed_layer_path_id ( self ) : # Using OrderedDict, because order of JSON elements is important v1_metadata = OrderedDict ( self . old_image_config ) # Update image creation date v1_metadata [ 'created' ] = self . date # Remove unnecessary elements # Do not fail if key is not found for key in 'history' , 'rootfs' , 'container' : v1_metadata . pop ( key , None ) # Docker internally changes the order of keys between # exported metadata (why oh why?!). We need to add 'os' # element after 'layer_id' operating_system = v1_metadata . pop ( 'os' , None ) # The 'layer_id' element is the chain_id of the # squashed layer v1_metadata [ 'layer_id' ] = "sha256:%s" % self . chain_ids [ - 1 ] # Add back 'os' element if operating_system : v1_metadata [ 'os' ] = operating_system # The 'parent' element is the name of the directory (inside the # exported tar archive) of the last layer that we move # (layer below squashed layer) if self . layer_paths_to_move : if self . layer_paths_to_squash : parent = self . layer_paths_to_move [ - 1 ] else : parent = self . layer_paths_to_move [ 0 ] v1_metadata [ 'parent' ] = "sha256:%s" % parent # The 'Image' element is the id of the layer from which we squash if self . squash_id : # Update image id, should be one layer below squashed layer v1_metadata [ 'config' ] [ 'Image' ] = self . squash_id else : v1_metadata [ 'config' ] [ 'Image' ] = "" # Get the sha256sum of the JSON exported metadata, # we do not care about the metadata anymore sha = self . _dump_json ( v1_metadata ) [ 1 ] return sha
This function generates the id used to name the directory to store the squashed layer content in the archive .
459
21
245,335
def write_local_file ( self , outputfile , path ) : self . logger . info ( "Writing file to %s" , path ) outputfile . seek ( 0 ) with open ( path , 'wb' ) as fd : copyfileobj ( outputfile , fd )
Write file to the desired path .
61
7
245,336
def _cleanup_old_backups ( self , database = None , servername = None ) : self . storage . clean_old_backups ( encrypted = self . encrypt , compressed = self . compress , content_type = self . content_type , database = database , servername = servername )
Cleanup old backups keeping the number of backups specified by DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month .
68
33
245,337
def _save_new_backup ( self , database ) : self . logger . info ( "Backing Up Database: %s" , database [ 'NAME' ] ) # Get backup and name filename = self . connector . generate_filename ( self . servername ) outputfile = self . connector . create_dump ( ) # Apply trans if self . compress : compressed_file , filename = utils . compress_file ( outputfile , filename ) outputfile = compressed_file if self . encrypt : encrypted_file , filename = utils . encrypt_file ( outputfile , filename ) outputfile = encrypted_file # Set file name filename = self . filename if self . filename else filename self . logger . debug ( "Backup size: %s" , utils . handle_size ( outputfile ) ) # Store backup outputfile . seek ( 0 ) if self . path is None : self . write_to_storage ( outputfile , filename ) else : self . write_local_file ( outputfile , self . path )
Save a new backup file .
219
6
245,338
def _explore_storage ( self ) : path = '' dirs = [ path ] while dirs : path = dirs . pop ( ) subdirs , files = self . media_storage . listdir ( path ) for media_filename in files : yield os . path . join ( path , media_filename ) dirs . extend ( [ os . path . join ( path , subdir ) for subdir in subdirs ] )
Generator of all files contained in media storage .
94
10
245,339
def _create_tar ( self , name ) : fileobj = utils . create_spooled_temporary_file ( ) mode = 'w:gz' if self . compress else 'w' tar_file = tarfile . open ( name = name , fileobj = fileobj , mode = mode ) for media_filename in self . _explore_storage ( ) : tarinfo = tarfile . TarInfo ( media_filename ) media_file = self . media_storage . open ( media_filename ) tarinfo . size = len ( media_file ) tar_file . addfile ( tarinfo , media_file ) # Close the TAR for writing tar_file . close ( ) return fileobj
Create TAR file .
153
5
245,340
def backup_mediafiles ( self ) : # Create file name extension = "tar%s" % ( '.gz' if self . compress else '' ) filename = utils . filename_generate ( extension , servername = self . servername , content_type = self . content_type ) tarball = self . _create_tar ( filename ) # Apply trans if self . encrypt : encrypted_file = utils . encrypt_file ( tarball , filename ) tarball , filename = encrypted_file self . logger . debug ( "Backup size: %s" , utils . handle_size ( tarball ) ) # Store backup tarball . seek ( 0 ) if self . path is None : self . write_to_storage ( tarball , filename ) else : self . write_local_file ( tarball , self . path )
Create backup file and write it to storage .
181
9
245,341
def bytes_to_str ( byteVal , decimals = 1 ) : for unit , byte in BYTES : if ( byteVal >= byte ) : if decimals == 0 : return '%s %s' % ( int ( round ( byteVal / byte , 0 ) ) , unit ) return '%s %s' % ( round ( byteVal / byte , decimals ) , unit ) return '%s B' % byteVal
Convert bytes to a human readable string .
97
9
245,342
def mail_admins ( subject , message , fail_silently = False , connection = None , html_message = None ) : if not settings . ADMINS : return mail = EmailMultiAlternatives ( '%s%s' % ( settings . EMAIL_SUBJECT_PREFIX , subject ) , message , settings . SERVER_EMAIL , [ a [ 1 ] for a in settings . ADMINS ] , connection = connection ) if html_message : mail . attach_alternative ( html_message , 'text/html' ) mail . send ( fail_silently = fail_silently )
Sends a message to the admins as defined by the DBBACKUP_ADMINS setting .
134
21
245,343
def create_spooled_temporary_file ( filepath = None , fileobj = None ) : spooled_file = tempfile . SpooledTemporaryFile ( max_size = settings . TMP_FILE_MAX_SIZE , dir = settings . TMP_DIR ) if filepath : fileobj = open ( filepath , 'r+b' ) if fileobj is not None : fileobj . seek ( 0 ) copyfileobj ( fileobj , spooled_file , settings . TMP_FILE_READ_SIZE ) return spooled_file
Create a spooled temporary file . if filepath or fileobj is defined its content will be copied into temporary file .
125
25
245,344
def compress_file ( inputfile , filename ) : outputfile = create_spooled_temporary_file ( ) new_filename = filename + '.gz' zipfile = gzip . GzipFile ( filename = filename , fileobj = outputfile , mode = "wb" ) try : inputfile . seek ( 0 ) copyfileobj ( inputfile , zipfile , settings . TMP_FILE_READ_SIZE ) finally : zipfile . close ( ) return outputfile , new_filename
Compress input file using gzip and change its name .
106
12
245,345
def uncompress_file ( inputfile , filename ) : zipfile = gzip . GzipFile ( fileobj = inputfile , mode = "rb" ) try : outputfile = create_spooled_temporary_file ( fileobj = zipfile ) finally : zipfile . close ( ) new_basename = os . path . basename ( filename ) . replace ( '.gz' , '' ) return outputfile , new_basename
Uncompress this file using gzip and change its name .
95
13
245,346
def timestamp ( value ) : value = value if timezone . is_naive ( value ) else timezone . localtime ( value ) return value . strftime ( settings . DATE_FORMAT )
Return the timestamp of a datetime . datetime object .
43
12
245,347
def datefmt_to_regex ( datefmt ) : new_string = datefmt for pat , reg in PATTERN_MATCHNG : new_string = new_string . replace ( pat , reg ) return re . compile ( r'(%s)' % new_string )
Convert a strftime format string to a regex .
64
11
245,348
def filename_to_date ( filename , datefmt = None ) : datefmt = datefmt or settings . DATE_FORMAT datestring = filename_to_datestring ( filename , datefmt ) if datestring is not None : return datetime . strptime ( datestring , datefmt )
Return a datetime from a file name .
73
9
245,349
def filename_generate ( extension , database_name = '' , servername = None , content_type = 'db' , wildcard = None ) : if content_type == 'db' : if '/' in database_name : database_name = os . path . basename ( database_name ) if '.' in database_name : database_name = database_name . split ( '.' ) [ 0 ] template = settings . FILENAME_TEMPLATE elif content_type == 'media' : template = settings . MEDIA_FILENAME_TEMPLATE else : template = settings . FILENAME_TEMPLATE params = { 'servername' : servername or settings . HOSTNAME , 'datetime' : wildcard or datetime . now ( ) . strftime ( settings . DATE_FORMAT ) , 'databasename' : database_name , 'extension' : extension , 'content_type' : content_type } if callable ( template ) : filename = template ( * * params ) else : filename = template . format ( * * params ) filename = REG_FILENAME_CLEAN . sub ( '-' , filename ) filename = filename [ 1 : ] if filename . startswith ( '-' ) else filename return filename
Create a new backup filename .
281
6
245,350
def get_storage ( path = None , options = None ) : path = path or settings . STORAGE options = options or settings . STORAGE_OPTIONS if not path : raise ImproperlyConfigured ( 'You must specify a storage class using ' 'DBBACKUP_STORAGE settings.' ) return Storage ( path , * * options )
Get the specified storage configured with options .
74
8
245,351
def list_backups ( self , encrypted = None , compressed = None , content_type = None , database = None , servername = None ) : if content_type not in ( 'db' , 'media' , None ) : msg = "Bad content_type %s, must be 'db', 'media', or None" % ( content_type ) raise TypeError ( msg ) # TODO: Make better filter for include only backups files = [ f for f in self . list_directory ( ) if utils . filename_to_datestring ( f ) ] if encrypted is not None : files = [ f for f in files if ( '.gpg' in f ) == encrypted ] if compressed is not None : files = [ f for f in files if ( '.gz' in f ) == compressed ] if content_type == 'media' : files = [ f for f in files if '.tar' in f ] elif content_type == 'db' : files = [ f for f in files if '.tar' not in f ] if database : files = [ f for f in files if database in f ] if servername : files = [ f for f in files if servername in f ] return files
List stored files except given filter . If filter is None it won t be used . content_type must be db for database backups or media for media backups .
263
32
245,352
def get_older_backup ( self , encrypted = None , compressed = None , content_type = None , database = None , servername = None ) : files = self . list_backups ( encrypted = encrypted , compressed = compressed , content_type = content_type , database = database , servername = servername ) if not files : raise FileNotFound ( "There's no backup file available." ) return min ( files , key = utils . filename_to_date )
Return the older backup s file name .
106
8
245,353
def clean_old_backups ( self , encrypted = None , compressed = None , content_type = None , database = None , servername = None , keep_number = None ) : if keep_number is None : keep_number = settings . CLEANUP_KEEP if content_type == 'db' else settings . CLEANUP_KEEP_MEDIA keep_filter = settings . CLEANUP_KEEP_FILTER files = self . list_backups ( encrypted = encrypted , compressed = compressed , content_type = content_type , database = database , servername = servername ) files = sorted ( files , key = utils . filename_to_date , reverse = True ) files_to_delete = [ fi for i , fi in enumerate ( files ) if i >= keep_number ] for filename in files_to_delete : if keep_filter ( filename ) : continue self . delete_file ( filename )
Delete olders backups and hold the number defined .
204
10
245,354
def _get_database ( self , options ) : database_name = options . get ( 'database' ) if not database_name : if len ( settings . DATABASES ) > 1 : errmsg = "Because this project contains more than one database, you" " must specify the --database option." raise CommandError ( errmsg ) database_name = list ( settings . DATABASES . keys ( ) ) [ 0 ] if database_name not in settings . DATABASES : raise CommandError ( "Database %s does not exist." % database_name ) return database_name , settings . DATABASES [ database_name ]
Get the database to restore .
142
6
245,355
def _restore_backup ( self ) : input_filename , input_file = self . _get_backup_file ( database = self . database_name , servername = self . servername ) self . logger . info ( "Restoring backup for database '%s' and server '%s'" , self . database_name , self . servername ) self . logger . info ( "Restoring: %s" % input_filename ) if self . decrypt : unencrypted_file , input_filename = utils . unencrypt_file ( input_file , input_filename , self . passphrase ) input_file . close ( ) input_file = unencrypted_file if self . uncompress : uncompressed_file , input_filename = utils . uncompress_file ( input_file , input_filename ) input_file . close ( ) input_file = uncompressed_file self . logger . info ( "Restore tempfile created: %s" , utils . handle_size ( input_file ) ) if self . interactive : self . _ask_confirmation ( ) input_file . seek ( 0 ) self . connector = get_connector ( self . database_name ) self . connector . restore_dump ( input_file )
Restore the specified database .
275
6
245,356
def get_connector ( database_name = None ) : from django . db import connections , DEFAULT_DB_ALIAS # Get DB database_name = database_name or DEFAULT_DB_ALIAS connection = connections [ database_name ] engine = connection . settings_dict [ 'ENGINE' ] connector_settings = settings . CONNECTORS . get ( database_name , { } ) connector_path = connector_settings . get ( 'CONNECTOR' , CONNECTOR_MAPPING [ engine ] ) connector_module_path = '.' . join ( connector_path . split ( '.' ) [ : - 1 ] ) module = import_module ( connector_module_path ) connector_name = connector_path . split ( '.' ) [ - 1 ] connector = getattr ( module , connector_name ) return connector ( database_name , * * connector_settings )
Get a connector from its database key in setttings .
192
12
245,357
def settings ( self ) : if not hasattr ( self , '_settings' ) : sett = self . connection . settings_dict . copy ( ) sett . update ( settings . CONNECTORS . get ( self . database_name , { } ) ) self . _settings = sett return self . _settings
Mix of database and connector settings .
65
7
245,358
def run_command ( self , command , stdin = None , env = None ) : cmd = shlex . split ( command ) stdout = SpooledTemporaryFile ( max_size = settings . TMP_FILE_MAX_SIZE , dir = settings . TMP_DIR ) stderr = SpooledTemporaryFile ( max_size = settings . TMP_FILE_MAX_SIZE , dir = settings . TMP_DIR ) full_env = os . environ . copy ( ) if self . use_parent_env else { } full_env . update ( self . env ) full_env . update ( env or { } ) try : if isinstance ( stdin , ( ContentFile , SFTPStorageFile ) ) : process = Popen ( cmd , stdin = PIPE , stdout = stdout , stderr = stderr , env = full_env ) process . communicate ( input = stdin . read ( ) ) else : process = Popen ( cmd , stdin = stdin , stdout = stdout , stderr = stderr , env = full_env ) process . wait ( ) if process . poll ( ) : stderr . seek ( 0 ) raise exceptions . CommandConnectorError ( "Error running: {}\n{}" . format ( command , stderr . read ( ) . decode ( 'utf-8' ) ) ) stdout . seek ( 0 ) stderr . seek ( 0 ) return stdout , stderr except OSError as err : raise exceptions . CommandConnectorError ( "Error running: {}\n{}" . format ( command , str ( err ) ) )
Launch a shell command line .
359
6
245,359
def _assign_zones ( self ) : for zone_id in range ( 1 , 5 ) : zone = RainCloudyFaucetZone ( parent = self . _parent , controller = self . _controller , faucet = self , zone_id = zone_id ) if zone not in self . zones : self . zones . append ( zone )
Assign all RainCloudyFaucetZone managed by faucet .
77
16
245,360
def _find_zone_by_id ( self , zone_id ) : if not self . zones : return None zone = list ( filter ( lambda zone : zone . id == zone_id , self . zones ) ) return zone [ 0 ] if zone else None
Return zone by id .
56
5
245,361
def _set_zone_name ( self , zoneid , name ) : # zone starts with index 0 zoneid -= 1 data = { '_set_zone_name' : 'Set Name' , 'select_zone' : str ( zoneid ) , 'zone_name' : name , } self . _controller . post ( data )
Private method to override zone name .
73
7
245,362
def _set_watering_time ( self , zoneid , value ) : if value not in MANUAL_WATERING_ALLOWED : raise ValueError ( 'Valid options are: {}' . format ( ', ' . join ( map ( str , MANUAL_WATERING_ALLOWED ) ) ) ) if isinstance ( value , int ) and value == 0 : value = 'OFF' elif isinstance ( value , str ) : value = value . upper ( ) if value == 'ON' : value = MAX_WATERING_MINUTES ddata = self . preupdate ( ) attr = 'zone{}_select_manual_mode' . format ( zoneid ) ddata [ attr ] = value self . submit_action ( ddata )
Private method to set watering_time per zone .
169
10
245,363
def watering_time ( self ) : # zone starts with index 0 index = self . id - 1 auto_watering_time = self . _attributes [ 'rain_delay_mode' ] [ index ] [ 'auto_watering_time' ] manual_watering_time = self . _attributes [ 'rain_delay_mode' ] [ index ] [ 'manual_watering_time' ] if auto_watering_time > manual_watering_time : watering_time = auto_watering_time else : watering_time = manual_watering_time return watering_time
Return watering_time from zone .
131
7
245,364
def _set_rain_delay ( self , zoneid , value ) : # current index for rain_delay starts in 0 zoneid -= 1 if isinstance ( value , int ) : if value > MAX_RAIN_DELAY_DAYS or value < 0 : return None elif value == 0 : value = 'off' elif value == 1 : value = '1day' elif value >= 2 : value = str ( value ) + 'days' elif isinstance ( value , str ) : if value . lower ( ) != 'off' : return None ddata = self . preupdate ( ) attr = 'zone{}_rain_delay_select' . format ( zoneid ) ddata [ attr ] = value self . submit_action ( ddata ) return True
Generic method to set auto_watering program .
170
10
245,365
def _set_auto_watering ( self , zoneid , value ) : if not isinstance ( value , bool ) : return None ddata = self . preupdate ( ) attr = 'zone{}_program_toggle' . format ( zoneid ) try : if not value : ddata . pop ( attr ) else : ddata [ attr ] = 'on' except KeyError : pass self . submit_action ( ddata ) return True
Private method to set auto_watering program .
98
10
245,366
def auto_watering ( self ) : value = "zone{}" . format ( self . id ) return find_program_status ( self . _parent . html [ 'home' ] , value )
Return if zone is configured to automatic watering .
43
9
245,367
def _to_dict ( self ) : return { 'auto_watering' : getattr ( self , "auto_watering" ) , 'droplet' : getattr ( self , "droplet" ) , 'is_watering' : getattr ( self , "is_watering" ) , 'name' : getattr ( self , "name" ) , 'next_cycle' : getattr ( self , "next_cycle" ) , 'rain_delay' : getattr ( self , "rain_delay" ) , 'watering_time' : getattr ( self , "watering_time" ) , }
Method to build zone dict .
138
6
245,368
def preupdate ( self , force_refresh = True ) : ddata = MANUAL_OP_DATA . copy ( ) # force update to make sure status is accurate if force_refresh : self . update ( ) # select current controller and faucet ddata [ 'select_controller' ] = self . _parent . controllers . index ( self . _controller ) ddata [ 'select_faucet' ] = self . _controller . faucets . index ( self . _faucet ) # check if zone is scheduled automatically (zone1_program_toggle) # only add zoneX_program_toogle to ddata when needed, # otherwise the field will be always on for zone in self . _faucet . zones : attr = 'zone{}_program_toggle' . format ( zone . id ) if zone . auto_watering : ddata [ attr ] = 'on' # check if zone current watering manually (zone1_select_manual_mode) for zone in self . _faucet . zones : attr = 'zone{}_select_manual_mode' . format ( zone . id ) if zone . watering_time and attr in ddata . keys ( ) : ddata [ attr ] = zone . watering_time # check if rain delay is selected (zone0_rain_delay_select) for zone in self . _faucet . zones : attr = 'zone{}_rain_delay_select' . format ( zone . id - 1 ) value = zone . rain_delay if value and attr in ddata . keys ( ) : if int ( value ) >= 2 and int ( value ) <= 7 : value = str ( value ) + 'days' else : value = str ( value ) + 'day' ddata [ attr ] = value return ddata
Return a dict with all current options prior submitting request .
398
11
245,369
def submit_action ( self , ddata ) : self . _controller . post ( ddata , url = HOME_ENDPOINT , referer = HOME_ENDPOINT )
Post data .
40
3
245,370
def controller ( self ) : if hasattr ( self , 'controllers' ) : if len ( self . controllers ) > 1 : # in the future, we should support more controllers raise TypeError ( "Only one controller per account." ) return self . controllers [ 0 ] raise AttributeError ( "There is no controller assigned." )
Show current linked controllers .
70
5
245,371
def _assign_faucets ( self , faucets ) : if not faucets : raise TypeError ( "Controller does not have a faucet assigned." ) for faucet_id in faucets : self . faucets . append ( RainCloudyFaucet ( self . _parent , self , faucet_id ) )
Assign RainCloudyFaucet objects to self . faucets .
78
16
245,372
def post ( self , ddata , url = SETUP_ENDPOINT , referer = SETUP_ENDPOINT ) : headers = HEADERS . copy ( ) if referer is None : headers . pop ( 'Referer' ) else : headers [ 'Referer' ] = referer # append csrftoken if 'csrfmiddlewaretoken' not in ddata . keys ( ) : ddata [ 'csrfmiddlewaretoken' ] = self . _parent . csrftoken req = self . _parent . client . post ( url , headers = headers , data = ddata ) if req . status_code == 200 : self . update ( )
Method to update some attributes on namespace .
145
8
245,373
def _get_cu_and_fu_status ( self ) : # adjust headers headers = HEADERS . copy ( ) headers [ 'Accept' ] = '*/*' headers [ 'X-Requested-With' ] = 'XMLHttpRequest' headers [ 'X-CSRFToken' ] = self . _parent . csrftoken args = '?controller_serial=' + self . serial + '&faucet_serial=' + self . faucet . serial req = self . _parent . client . get ( STATUS_ENDPOINT + args , headers = headers ) # token probably expired, then try again if req . status_code == 403 : self . _parent . login ( ) self . update ( ) elif req . status_code == 200 : self . attributes = req . json ( ) else : req . raise_for_status ( )
Submit GET request to update information .
190
7
245,374
def name ( self , value ) : data = { '_set_controller_name' : 'Set Name' , 'controller_name' : value , } self . post ( data , url = SETUP_ENDPOINT , referer = SETUP_ENDPOINT )
Set a new name to controller .
61
7
245,375
def faucet ( self ) : if hasattr ( self , 'faucets' ) : if len ( self . faucets ) > 1 : # in the future, we should support more faucets raise TypeError ( "Only one faucet per account." ) return self . faucets [ 0 ] raise AttributeError ( "There is no faucet assigned." )
Show current linked faucet .
83
7
245,376
def serial_finder ( data ) : if not isinstance ( data , BeautifulSoup ) : raise TypeError ( "Function requires BeautifulSoup HTML element." ) try : # The setup page contains a select box for each controller and each # faucet controllersElement = data . find_all ( 'select' , { 'id' : 'id_select_controller2' } ) faucetsElement = data . find_all ( 'select' , { 'id' : 'id_select_faucet2' } ) controllerSerial = controllersElement [ 0 ] . text . split ( '-' ) [ 1 ] . strip ( ) faucetSerial = faucetsElement [ 0 ] . text . split ( '-' ) [ 1 ] . strip ( ) # currently only one faucet is supported on the code # we have plans to support it in the future parsed_dict = { } parsed_dict [ 'controller_serial' ] = controllerSerial parsed_dict [ 'faucet_serial' ] = [ faucetSerial ] return parsed_dict except ( AttributeError , IndexError , ValueError ) : raise RainCloudyException ( 'Could not find any valid controller or faucet' )
Find controller serial and faucet_serial from the setup page .
259
14
245,377
def find_controller_or_faucet_name ( data , p_type ) : if not isinstance ( data , BeautifulSoup ) : raise TypeError ( "Function requires BeautilSoup HTML element." ) if not ( p_type == 'controller' or p_type == 'faucet' ) : raise TypeError ( "Function p_type must be controller or faucet" ) try : search_field = 'id_select_{0}' . format ( p_type ) child = data . find ( 'select' , { 'id' : search_field } ) return child . get_text ( ) . strip ( ) except AttributeError : return None
Find on the HTML document the controller name .
147
9
245,378
def find_zone_name ( data , zone_id ) : if not isinstance ( data , BeautifulSoup ) : raise TypeError ( "Function requires BeautilSoup HTML element." ) table = data . find ( 'table' , { 'class' : 'zone_table' } ) table_body = table . find ( 'tbody' ) rows = table_body . find_all ( 'span' , { 'class' : 'more_info' } ) for row in rows : if row . get_text ( ) . startswith ( str ( zone_id ) ) : return row . get_text ( ) [ 4 : ] . strip ( ) return None
Find on the HTML document the zone name .
146
9
245,379
def new_payment_query_listener ( sender , order = None , payment = None , * * kwargs ) : payment . amount = order . total payment . currency = order . currency logger . debug ( "new_payment_query_listener, amount=%s, currency=%s" , payment . amount , payment . currency )
Here we fill only two obligatory fields of payment and leave signal handler
74
13
245,380
def payment_status_changed_listener ( sender , instance , old_status , new_status , * * kwargs ) : logger . debug ( "payment_status_changed_listener, old=%s, new=%s" , old_status , new_status ) if old_status != 'paid' and new_status == 'paid' : # Ensures that we process order only one instance . order . status = 'P' instance . order . save ( )
Here we will actually do something when payment is accepted . E . g . lets change an order status .
104
21
245,381
def register_to_payment ( order_class , * * kwargs ) : global Payment global Order class Payment ( PaymentFactory . construct ( order = order_class , * * kwargs ) ) : objects = PaymentManager ( ) class Meta : ordering = ( '-created_on' , ) verbose_name = _ ( "Payment" ) verbose_name_plural = _ ( "Payments" ) Order = order_class # Now build models for backends backend_models_modules = import_backend_modules ( 'models' ) for backend_name , models_module in backend_models_modules . items ( ) : for model in models_module . build_models ( Payment ) : apps . register_model ( backend_name , model ) return Payment
A function for registering unaware order class to getpaid . This will generate a Payment model class that will store payments with ForeignKey to original order class
167
29
245,382
def get_backend_choices ( currency = None ) : choices = [ ] backends_names = getattr ( settings , 'GETPAID_BACKENDS' , [ ] ) for backend_name in backends_names : backend = import_module ( backend_name ) if currency : if currency in backend . PaymentProcessor . BACKEND_ACCEPTED_CURRENCY : choices . append ( ( backend_name , backend . PaymentProcessor . BACKEND_NAME ) ) else : choices . append ( ( backend_name , backend . PaymentProcessor . BACKEND_NAME ) ) return choices
Get active backends modules . Backend list can be filtered by supporting given currency .
130
17
245,383
def online ( cls , payload , ip , req_sig ) : from getpaid . models import Payment params = json . loads ( payload ) order_data = params . get ( 'order' , { } ) pos_id = order_data . get ( 'merchantPosId' ) payment_id = order_data . get ( 'extOrderId' ) key2 = cls . get_backend_setting ( 'key2' ) if pos_id != cls . get_backend_setting ( 'pos_id' ) : logger . warning ( 'Received message for different pos: {}' . format ( pos_id ) ) return 'ERROR' req_sig_dict = cls . parse_payu_sig ( req_sig ) sig = cls . compute_sig ( payload , key2 , algorithm = req_sig_dict . get ( 'algorithm' , 'md5' ) ) if sig != req_sig_dict [ 'signature' ] : logger . warning ( 'Received message with malformed signature. Payload: {}' . format ( payload ) ) return 'ERROR' try : payment = Payment . objects . get ( id = payment_id ) except Payment . DoesNotExist : logger . warning ( 'Received message for nonexistent payment: {}.\nPayload: {}' . format ( payment_id , payload ) ) return 'ERROR' status = order_data [ 'status' ] if payment . status != 'paid' : if status == 'COMPLETED' : payment . external_id = order_data [ 'orderId' ] payment . amount = Decimal ( order_data [ 'totalAmount' ] ) / Decimal ( 100 ) payment . amount_paid = payment . amount payment . currenct = order_data [ 'currencyCode' ] payment . paid_on = pendulum . parse ( params [ 'localReceiptDateTime' ] ) . in_tz ( 'utc' ) payment . description = order_data [ 'description' ] payment . change_status ( 'paid' ) elif status == 'PENDING' : payment . change_status ( 'in_progress' ) elif status in [ 'CANCELED' , 'REJECTED' ] : payment . change_status ( 'cancelled' ) return 'OK'
Receive and analyze request from payment service with information on payment status change .
509
15
245,384
def get_order_description ( self , payment , order ) : template = getattr ( settings , 'GETPAID_ORDER_DESCRIPTION' , None ) if template : return Template ( template ) . render ( Context ( { "payment" : payment , "order" : order } ) ) else : return six . text_type ( order )
Renders order description using django template provided in settings . GETPAID_ORDER_DESCRIPTION or if not provided return unicode representation of Order object .
74
33
245,385
def get_backend_setting ( cls , name , default = None ) : backend_settings = get_backend_settings ( cls . BACKEND ) if default is not None : return backend_settings . get ( name , default ) else : try : return backend_settings [ name ] except KeyError : raise ImproperlyConfigured ( "getpaid '%s' requires backend '%s' setting" % ( cls . BACKEND , name ) )
Reads name setting from backend settings dictionary .
100
9
245,386
def get_gateway_url ( self , request ) : params = { 'id' : self . get_backend_setting ( 'id' ) , 'description' : self . get_order_description ( self . payment , self . payment . order ) , 'amount' : self . payment . amount , 'currency' : self . payment . currency , 'type' : 0 , # 0 = show "return" button after finished payment 'control' : self . payment . pk , 'URL' : self . get_URL ( self . payment . pk ) , 'URLC' : self . get_URLC ( ) , 'api_version' : 'dev' , } user_data = { 'email' : None , 'lang' : None , } signals . user_data_query . send ( sender = None , order = self . payment . order , user_data = user_data ) if user_data [ 'email' ] : params [ 'email' ] = user_data [ 'email' ] if user_data [ 'lang' ] and user_data [ 'lang' ] . lower ( ) in self . _ACCEPTED_LANGS : params [ 'lang' ] = user_data [ 'lang' ] . lower ( ) elif self . get_backend_setting ( 'lang' , False ) and self . get_backend_setting ( 'lang' ) . lower ( ) in self . _ACCEPTED_LANGS : params [ 'lang' ] = self . get_backend_setting ( 'lang' ) . lower ( ) if self . get_backend_setting ( 'onlinetransfer' , False ) : params [ 'onlinetransfer' ] = 1 if self . get_backend_setting ( 'p_email' , False ) : params [ 'p_email' ] = self . get_backend_setting ( 'p_email' ) if self . get_backend_setting ( 'p_info' , False ) : params [ 'p_info' ] = self . get_backend_setting ( 'p_info' ) if self . get_backend_setting ( 'tax' , False ) : params [ 'tax' ] = 1 gateway_url = self . get_backend_setting ( 'gateway_url' , self . _GATEWAY_URL ) if self . get_backend_setting ( 'method' , 'get' ) . lower ( ) == 'post' : return gateway_url , 'POST' , params elif self . get_backend_setting ( 'method' , 'get' ) . lower ( ) == 'get' : for key in params . keys ( ) : params [ key ] = six . text_type ( params [ key ] ) . encode ( 'utf-8' ) return gateway_url + '?' + urlencode ( params ) , "GET" , { } else : raise ImproperlyConfigured ( 'Dotpay payment backend accepts only GET or POST' )
Routes a payment to Gateway should return URL for redirection .
658
14
245,387
def channel_ready_future ( channel ) : fut = channel . _loop . create_future ( ) def _set_result ( state ) : if not fut . done ( ) and state is _grpc . ChannelConnectivity . READY : fut . set_result ( None ) fut . add_done_callback ( lambda f : channel . unsubscribe ( _set_result ) ) channel . subscribe ( _set_result , try_to_connect = True ) return fut
Creates a Future that tracks when a Channel is ready .
101
12
245,388
def insecure_channel ( target , options = None , * , loop = None , executor = None , standalone_pool_for_streaming = False ) : return Channel ( _grpc . insecure_channel ( target , options ) , loop , executor , standalone_pool_for_streaming )
Creates an insecure Channel to a server .
64
9
245,389
def secure_channel ( target , credentials , options = None , * , loop = None , executor = None , standalone_pool_for_streaming = False ) : return Channel ( _grpc . secure_channel ( target , credentials , options ) , loop , executor , standalone_pool_for_streaming )
Creates a secure Channel to a server .
68
9
245,390
def future ( self , request , timeout = None , metadata = None , credentials = None ) : return _utils . wrap_future_call ( self . _inner . future ( request , timeout , metadata , credentials ) , self . _loop , self . _executor )
Asynchronously invokes the underlying RPC .
57
9
245,391
async def with_call ( self , request_iterator , timeout = None , metadata = None , credentials = None ) : fut = self . future ( request_iterator , timeout , metadata , credentials ) try : result = await fut return ( result , fut ) finally : if not fut . done ( ) : fut . cancel ( )
Synchronously invokes the underlying RPC on the client .
69
12
245,392
def future ( self , request_iterator , timeout = None , metadata = None , credentials = None ) : return _utils . wrap_future_call ( self . _inner . future ( _utils . WrappedAsyncIterator ( request_iterator , self . _loop ) , timeout , metadata , credentials ) , self . _loop , self . _executor )
Asynchronously invokes the underlying RPC on the client .
75
12
245,393
def config_field_type ( field , cls ) : return defs . ConfigField ( lambda _ : isinstance ( _ , cls ) , lambda : CONFIG_FIELD_TYPE_ERROR . format ( field , cls . __name__ ) )
Validate a config field against a type .
54
9
245,394
def get_config_parameters ( plugin_path ) : json_config_path = os . path . join ( plugin_path , defs . CONFIG_FILE_NAME ) with open ( json_config_path , "r" ) as f : config = json . load ( f ) return config . get ( defs . PARAMETERS , [ ] )
Return the parameters section from config . json .
78
9
245,395
def validate_config_parameters ( config_json , allowed_keys , allowed_types ) : custom_fields = config_json . get ( defs . PARAMETERS , [ ] ) for field in custom_fields : validate_field ( field , allowed_keys , allowed_types ) default = field . get ( defs . DEFAULT ) field_type = field . get ( defs . TYPE ) if default : validate_field_matches_type ( field [ defs . VALUE ] , default , field_type )
Validate parameters in config file .
115
7
245,396
def validate_field_matches_type ( field , value , field_type , select_items = None , _min = None , _max = None ) : if ( field_type == defs . TEXT_TYPE and not isinstance ( value , six . string_types ) ) or ( field_type == defs . STRING_TYPE and not isinstance ( value , six . string_types ) ) or ( field_type == defs . BOOLEAN_TYPE and not isinstance ( value , bool ) ) or ( field_type == defs . INTEGER_TYPE and not isinstance ( value , int ) ) : raise exceptions . ConfigFieldTypeMismatch ( field , value , field_type ) if field_type == defs . INTEGER_TYPE : if _min and value < _min : raise exceptions . ConfigFieldTypeMismatch ( field , value , "must be higher than {}" . format ( _min ) ) if _max and value > _max : raise exceptions . ConfigFieldTypeMismatch ( field , value , "must be lower than {}" . format ( _max ) ) if field_type == defs . SELECT_TYPE : from honeycomb . utils . plugin_utils import get_select_items items = get_select_items ( select_items ) if value not in items : raise exceptions . ConfigFieldTypeMismatch ( field , value , "one of: {}" . format ( ", " . join ( items ) ) )
Validate a config field against a specific type .
322
10
245,397
def get_truetype ( value ) : if value in [ "true" , "True" , "y" , "Y" , "yes" ] : return True if value in [ "false" , "False" , "n" , "N" , "no" ] : return False if value . isdigit ( ) : return int ( value ) return str ( value )
Convert a string to a pythonized parameter .
83
10
245,398
def validate_field ( field , allowed_keys , allowed_types ) : for key , value in field . items ( ) : if key not in allowed_keys : raise exceptions . ParametersFieldError ( key , "property" ) if key == defs . TYPE : if value not in allowed_types : raise exceptions . ParametersFieldError ( value , key ) if key == defs . VALUE : if not is_valid_field_name ( value ) : raise exceptions . ParametersFieldError ( value , "field name" )
Validate field is allowed and valid .
111
8
245,399
def is_valid_field_name ( value ) : leftovers = re . sub ( r"\w" , "" , value ) leftovers = re . sub ( r"-" , "" , leftovers ) if leftovers != "" or value [ 0 ] . isdigit ( ) or value [ 0 ] in [ "-" , "_" ] or " " in value : return False return True
Ensure field name is valid .
83
7