idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
6,300
def retrieve_prop ( name ) : handler_get , handler_set = None , None if name in props_get : handler_get = props_get [ name ] if name in props_set : handler_set = props_set [ name ] return ( name , handler_get , handler_set )
retrieve a property handler
65
5
6,301
def get_queryset ( self ) : model_type = self . request . GET . get ( "type" ) pk = self . request . GET . get ( "id" ) content_type_model = ContentType . objects . get ( model = model_type . lower ( ) ) Model = content_type_model . model_class ( ) model_obj = Model . objects . filter ( id = pk ) . first ( ) return Comment . objects . filter_by_object ( model_obj )
Parameters are already validated in the QuerySetPermission
111
10
6,302
def extractall ( archive , filename , dstdir ) : if zipfile . is_zipfile ( archive ) : z = zipfile . ZipFile ( archive ) for name in z . namelist ( ) : targetname = name # directories ends with '/' (on Windows as well) if targetname . endswith ( '/' ) : targetname = targetname [ : - 1 ] # don't include leading "/" from file name if present if targetname . startswith ( os . path . sep ) : targetname = os . path . join ( dstdir , targetname [ 1 : ] ) else : targetname = os . path . join ( dstdir , targetname ) targetname = os . path . normpath ( targetname ) # Create all upper directories if necessary. upperdirs = os . path . dirname ( targetname ) if upperdirs and not os . path . exists ( upperdirs ) : os . makedirs ( upperdirs ) # directories ends with '/' (on Windows as well) if not name . endswith ( '/' ) : # copy file file ( targetname , 'wb' ) . write ( z . read ( name ) ) elif tarfile . is_tarfile ( archive ) : tar = tarfile . open ( archive ) tar . extractall ( path = dstdir ) else : # seems to be a single file, save it shutil . copyfile ( archive , os . path . join ( dstdir , filename ) )
extract zip or tar content to dstdir
318
9
6,303
def _merge_js ( input_file , input_dir , output_file ) : from studio . lib . buildjs import merge_js merge_js . main ( input_file , input_dir , output_file )
Call into the merge_js module to merge the js files and minify the code .
49
18
6,304
def lcopt_bw2_setup ( ecospold_path , overwrite = False , db_name = None ) : # pragma: no cover default_ei_name = "Ecoinvent3_3_cutoff" if db_name is None : db_name = DEFAULT_PROJECT_STEM + default_ei_name if db_name in bw2 . projects : if overwrite : bw2 . projects . delete_project ( name = db_name , delete_dir = True ) else : print ( 'Looks like bw2 is already set up - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_setup in a python shell using overwrite = True' ) return False bw2 . projects . set_current ( db_name ) bw2 . bw2setup ( ) ei = bw2 . SingleOutputEcospold2Importer ( fix_mac_path_escapes ( ecospold_path ) , default_ei_name ) ei . apply_strategies ( ) ei . statistics ( ) ei . write_database ( ) return True
Utility function to set up brightway2 to work correctly with lcopt .
249
17
6,305
def forwast_autodownload ( FORWAST_URL ) : dirpath = tempfile . mkdtemp ( ) r = requests . get ( FORWAST_URL ) z = zipfile . ZipFile ( io . BytesIO ( r . content ) ) z . extractall ( dirpath ) return os . path . join ( dirpath , 'forwast.bw2package' )
Autodownloader for forwast database package for brightway . Used by lcopt_bw2_forwast_setup to get the database data . Not designed to be used on its own
88
43
6,306
def lcopt_bw2_forwast_setup ( use_autodownload = True , forwast_path = None , db_name = FORWAST_PROJECT_NAME , overwrite = False ) : if use_autodownload : forwast_filepath = forwast_autodownload ( FORWAST_URL ) elif forwast_path is not None : forwast_filepath = forwast_path else : raise ValueError ( 'Need a path if not using autodownload' ) if storage . project_type == 'single' : db_name = storage . single_project_name if bw2_project_exists ( db_name ) : bw2 . projects . set_current ( db_name ) else : bw2 . projects . set_current ( db_name ) bw2 . bw2setup ( ) else : if db_name in bw2 . projects : if overwrite : bw2 . projects . delete_project ( name = db_name , delete_dir = True ) else : print ( 'Looks like bw2 is already set up for the FORWAST database - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_forwast_setup in a python shell using overwrite = True' ) return False # no need to keep running bw2setup - we can just copy a blank project which has been set up before if not bw2_project_exists ( DEFAULT_BIOSPHERE_PROJECT ) : lcopt_biosphere_setup ( ) bw2 . projects . set_current ( DEFAULT_BIOSPHERE_PROJECT ) bw2 . create_core_migrations ( ) bw2 . projects . copy_project ( db_name , switch = True ) bw2 . BW2Package . import_file ( forwast_filepath ) return True
Utility function to set up brightway2 to work correctly with lcopt using the FORWAST database instead of ecoinvent
427
27
6,307
def _validate_samples_factors ( mwtabfile , validate_samples = True , validate_factors = True ) : from_subject_samples = { i [ "local_sample_id" ] for i in mwtabfile [ "SUBJECT_SAMPLE_FACTORS" ] [ "SUBJECT_SAMPLE_FACTORS" ] } from_subject_factors = { i [ "factors" ] for i in mwtabfile [ "SUBJECT_SAMPLE_FACTORS" ] [ "SUBJECT_SAMPLE_FACTORS" ] } if validate_samples : if "MS_METABOLITE_DATA" in mwtabfile : from_metabolite_data_samples = set ( mwtabfile [ "MS_METABOLITE_DATA" ] [ "MS_METABOLITE_DATA_START" ] [ "Samples" ] ) assert from_subject_samples == from_metabolite_data_samples if "NMR_BINNED_DATA" in mwtabfile : from_nmr_binned_data_samples = set ( mwtabfile [ "NMR_BINNED_DATA" ] [ "NMR_BINNED_DATA_START" ] [ "Fields" ] [ 1 : ] ) assert from_subject_samples == from_nmr_binned_data_samples if validate_factors : if "MS_METABOLITE_DATA" in mwtabfile : from_metabolite_data_factors = set ( mwtabfile [ "MS_METABOLITE_DATA" ] [ "MS_METABOLITE_DATA_START" ] [ "Factors" ] ) assert from_subject_factors == from_metabolite_data_factors
Validate Samples and Factors identifiers across the file .
417
11
6,308
def daemonize ( self ) : if self . userid : uid = pwd . getpwnam ( self . userid ) . pw_uid os . seteuid ( uid ) try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) except OSError as err : sys . stderr . write ( "First fork failed: {0} ({1})\n" . format ( err . errno , err . strerror ) ) sys . exit ( 1 ) # decouple from parent environment os . chdir ( "/" ) os . setsid ( ) os . umask ( 0 ) # Second fork try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) except OSError as err : sys . stderr . write ( "Second fork failed: {0} ({1})\n" . format ( err . errno , err . strerror ) ) sys . exit ( 1 ) sys . stdout . flush ( ) sys . stderr . flush ( ) si = open ( self . stdin , 'r' ) so = open ( self . stdout , 'w' ) se = open ( self . stderr , 'w' ) os . dup2 ( si . fileno ( ) , sys . stdin . fileno ( ) ) os . dup2 ( so . fileno ( ) , sys . stdout . fileno ( ) ) os . dup2 ( se . fileno ( ) , sys . stderr . fileno ( ) ) # write PID file atexit . register ( self . delpid ) pid = str ( os . getpid ( ) ) open ( self . pidfile , 'w' ) . write ( "%s\n" % pid )
Double - fork magic
386
4
6,309
def RenderJson ( self , pretty = False ) : steps = self . _steps topdict = { } topdict [ 'tropo' ] = steps if pretty : try : json = jsonlib . dumps ( topdict , indent = 4 , sort_keys = False ) except TypeError : json = jsonlib . dumps ( topdict ) else : json = jsonlib . dumps ( topdict ) return json
Render a Tropo object into a Json string .
86
11
6,310
def getIndexedValue ( self , index ) : actions = self . _actions if ( type ( actions ) is list ) : dict = actions [ index ] else : dict = actions return dict . get ( 'value' , 'NoValue' )
Get the value of the indexed Tropo action .
52
10
6,311
def getNamedActionValue ( self , name ) : actions = self . _actions if ( type ( actions ) is list ) : for a in actions : if a . get ( 'name' , 'NoValue' ) == name : dict = a else : dict = actions return dict . get ( 'value' , 'NoValue' )
Get the value of the named Tropo action .
72
10
6,312
def stop_subprocess ( pid ) : if hasattr ( os , "kill" ) : import signal os . kill ( pid , signal . SIGTERM ) else : import win32api pid = win32api . OpenProcess ( 1 , 0 , pid ) win32api . TerminateProcess ( pid , 0 ) os . waitpid ( pid , 0 )
Stop subprocess whose process id is pid .
76
9
6,313
def file2abspath ( filename , this_file = __file__ ) : return os . path . abspath ( os . path . join ( os . path . dirname ( os . path . abspath ( this_file ) ) , filename ) )
generate absolute path for the given file and base dir
55
11
6,314
def file2json ( filename , encoding = 'utf-8' ) : with codecs . open ( filename , "r" , encoding = encoding ) as f : return json . load ( f )
save a line
42
3
6,315
def file2iter ( filename , encoding = 'utf-8' , comment_prefix = "#" , skip_empty_line = True ) : ret = list ( ) visited = set ( ) with codecs . open ( filename , encoding = encoding ) as f : for line in f : line = line . strip ( ) # skip empty line if skip_empty_line and len ( line ) == 0 : continue # skip comment line if comment_prefix and line . startswith ( comment_prefix ) : continue yield line
json stream parsing or line parsing
110
6
6,316
def json2file ( data , filename , encoding = 'utf-8' ) : with codecs . open ( filename , "w" , encoding = encoding ) as f : json . dump ( data , f , ensure_ascii = False , indent = 4 , sort_keys = True )
write json in canonical json format
63
6
6,317
def lines2file ( lines , filename , encoding = 'utf-8' ) : with codecs . open ( filename , "w" , encoding = encoding ) as f : for line in lines : f . write ( line ) f . write ( "\n" )
write json stream write lines too
56
6
6,318
def items2file ( items , filename , encoding = 'utf-8' , modifier = 'w' ) : with codecs . open ( filename , modifier , encoding = encoding ) as f : for item in items : f . write ( u"{}\n" . format ( json . dumps ( item , ensure_ascii = False , sort_keys = True ) ) )
json array to file canonical json format
81
7
6,319
def convert ( schema ) : # pylint: disable=too-many-return-statements,too-many-branches if isinstance ( schema , vol . Schema ) : schema = schema . schema if isinstance ( schema , Mapping ) : val = [ ] for key , value in schema . items ( ) : description = None if isinstance ( key , vol . Marker ) : pkey = key . schema description = key . description else : pkey = key pval = convert ( value ) pval [ 'name' ] = pkey if description is not None : pval [ 'description' ] = description if isinstance ( key , ( vol . Required , vol . Optional ) ) : pval [ key . __class__ . __name__ . lower ( ) ] = True if key . default is not vol . UNDEFINED : pval [ 'default' ] = key . default ( ) val . append ( pval ) return val if isinstance ( schema , vol . All ) : val = { } for validator in schema . validators : val . update ( convert ( validator ) ) return val if isinstance ( schema , ( vol . Clamp , vol . Range ) ) : val = { } if schema . min is not None : val [ 'valueMin' ] = schema . min if schema . max is not None : val [ 'valueMax' ] = schema . max return val if isinstance ( schema , vol . Length ) : val = { } if schema . min is not None : val [ 'lengthMin' ] = schema . min if schema . max is not None : val [ 'lengthMax' ] = schema . max return val if isinstance ( schema , vol . Datetime ) : return { 'type' : 'datetime' , 'format' : schema . format , } if isinstance ( schema , vol . In ) : if isinstance ( schema . container , Mapping ) : return { 'type' : 'select' , 'options' : list ( schema . container . items ( ) ) , } return { 'type' : 'select' , 'options' : [ ( item , item ) for item in schema . container ] } if schema in ( vol . Lower , vol . Upper , vol . Capitalize , vol . Title , vol . Strip ) : return { schema . __name__ . lower ( ) : True , } if isinstance ( schema , vol . Coerce ) : schema = schema . type if schema in TYPES_MAP : return { 'type' : TYPES_MAP [ schema ] } raise ValueError ( 'Unable to convert schema: {}' . format ( schema ) )
Convert a voluptuous schema to a dictionary .
569
11
6,320
def version_cmp ( version_a , version_b ) : a = normalize_version ( version_a ) b = normalize_version ( version_b ) i_a = a [ 0 ] * 100 + a [ 1 ] * 10 + a [ 0 ] * 1 i_b = b [ 0 ] * 100 + b [ 1 ] * 10 + b [ 0 ] * 1 return i_a - i_b
Compares two versions
92
4
6,321
def getheader ( self , field , default = '' ) : if self . headers : for header in self . headers : if field . lower ( ) == header . lower ( ) : return self . headers [ header ] return default
Returns the HTTP response header field case insensitively
47
9
6,322
def isAlphanum ( c ) : return ( ( c >= 'a' and c <= 'z' ) or ( c >= '0' and c <= '9' ) or ( c >= 'A' and c <= 'Z' ) or c == '_' or c == '$' or c == '\\' or ( c is not None and ord ( c ) > 126 ) )
return true if the character is a letter digit underscore dollar sign or non - ASCII character .
85
18
6,323
def _get ( self ) : c = self . theLookahead self . theLookahead = None if c == None : c = self . instream . read ( 1 ) if c >= ' ' or c == '\n' : return c if c == '' : # EOF return '\000' if c == '\r' : return '\n' return ' '
return the next character from stdin . Watch out for lookahead . If the character is a control character translate it to a space or linefeed .
81
30
6,324
def _jsmin ( self ) : self . theA = '\n' self . _action ( 3 ) while self . theA != '\000' : if self . theA == ' ' : if isAlphanum ( self . theB ) : self . _action ( 1 ) else : self . _action ( 2 ) elif self . theA == '\n' : if self . theB in [ '{' , '[' , '(' , '+' , '-' ] : self . _action ( 1 ) elif self . theB == ' ' : self . _action ( 3 ) else : if isAlphanum ( self . theB ) : self . _action ( 1 ) else : self . _action ( 2 ) else : if self . theB == ' ' : if isAlphanum ( self . theA ) : self . _action ( 1 ) else : self . _action ( 3 ) elif self . theB == '\n' : if self . theA in [ '}' , ']' , ')' , '+' , '-' , '"' , '\'' ] : self . _action ( 1 ) else : if isAlphanum ( self . theA ) : self . _action ( 1 ) else : self . _action ( 3 ) else : self . _action ( 1 )
Copy the input to the output deleting the characters which are insignificant to JavaScript . Comments will be removed . Tabs will be replaced with spaces . Carriage returns will be replaced with linefeeds . Most spaces and linefeeds will be removed .
292
49
6,325
def _get_lts_from_user ( self , user ) : req = meta . Session . query ( LayerTemplate ) . select_from ( join ( LayerTemplate , User ) ) return req . filter ( User . login == user ) . all ( )
Get layertemplates owned by a user from the database .
55
13
6,326
def _get_lt_from_user_by_id ( self , user , lt_id ) : req = meta . Session . query ( LayerTemplate ) . select_from ( join ( LayerTemplate , User ) ) try : return req . filter ( and_ ( User . login == user , LayerTemplate . id == lt_id ) ) . one ( ) except Exception , e : return None
Get a layertemplate owned by a user from the database by lt_id .
86
19
6,327
def tokenizer ( text ) : stream = deque ( text . split ( "\n" ) ) while len ( stream ) > 0 : line = stream . popleft ( ) if line . startswith ( "#METABOLOMICS WORKBENCH" ) : yield KeyValue ( "#METABOLOMICS WORKBENCH" , "\n" ) yield KeyValue ( "HEADER" , line ) for identifier in line . split ( " " ) : if ":" in identifier : key , value = identifier . split ( ":" ) yield KeyValue ( key , value ) elif line . startswith ( "#ANALYSIS TYPE" ) : yield KeyValue ( "HEADER" , line ) elif line . startswith ( "#SUBJECT_SAMPLE_FACTORS:" ) : yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( "#SUBJECT_SAMPLE_FACTORS" , "\n" ) elif line . startswith ( "#" ) : yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( line . strip ( ) , "\n" ) elif line . startswith ( "SUBJECT_SAMPLE_FACTORS" ) : key , subject_type , local_sample_id , factors , additional_sample_data = line . split ( "\t" ) # factors = [dict([[i.strip() for i in f.split(":")]]) for f in factors.split("|")] yield SubjectSampleFactors ( key . strip ( ) , subject_type , local_sample_id , factors , additional_sample_data ) elif line . endswith ( "_START" ) : yield KeyValue ( line , "\n" ) while not line . endswith ( "_END" ) : line = stream . popleft ( ) if line . endswith ( "_END" ) : yield KeyValue ( line . strip ( ) , "\n" ) else : data = line . split ( "\t" ) yield KeyValue ( data [ 0 ] , tuple ( data ) ) else : if line : if line . startswith ( "MS:MS_RESULTS_FILE" ) or line . startswith ( "NM:NMR_RESULTS_FILE" ) : try : key , value , extra = line . split ( "\t" ) extra_key , extra_value = extra . strip ( ) . split ( ":" ) yield KeyValueExtra ( key . strip ( ) [ 3 : ] , value , extra_key , extra_value ) except ValueError : key , value = line . split ( "\t" ) yield KeyValue ( key . strip ( ) [ 3 : ] , value ) else : try : key , value = line . split ( "\t" ) if ":" in key : if key . startswith ( "MS_METABOLITE_DATA:UNITS" ) : yield KeyValue ( key . strip ( ) , value ) else : yield KeyValue ( key . strip ( ) [ 3 : ] , value ) else : yield KeyValue ( key . strip ( ) , value ) except ValueError : print ( "LINE WITH ERROR:\n\t" , repr ( line ) ) raise yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( "!#ENDFILE" , "\n" )
A lexical analyzer for the mwtab formatted files .
733
13
6,328
def _get_map_from_user_by_id ( self , user , map_id ) : req = Session . query ( Map ) . select_from ( join ( Map , User ) ) try : return req . filter ( and_ ( User . login == user , Map . id == map_id ) ) . one ( ) except Exception , e : return None
Get a mapfile owned by a user from the database by map_id .
79
16
6,329
def _get_maps_from_user ( self , user ) : req = Session . query ( Map ) . select_from ( join ( Map , User ) ) return req . filter ( User . login == user ) . all ( )
Get mapfiles owned by a user from the database .
50
11
6,330
def _new_map_from_user ( self , user , name , filepath ) : map = Map ( name , filepath ) map . user = Session . query ( User ) . filter ( User . login == user ) . one ( ) Session . add ( map ) Session . commit ( ) return map
Create a new mapfile entry in database .
65
9
6,331
def _proxy ( self , url , urlparams = None ) : for k , v in request . params . iteritems ( ) : urlparams [ k ] = v query = urlencode ( urlparams ) full_url = url if query : if not full_url . endswith ( "?" ) : full_url += "?" full_url += query # build the request with its headers req = urllib2 . Request ( url = full_url ) for header in request . headers : if header . lower ( ) == "host" : req . add_header ( header , urlparse . urlparse ( url ) [ 1 ] ) else : req . add_header ( header , request . headers [ header ] ) res = urllib2 . urlopen ( req ) # add response headers i = res . info ( ) response . status = res . code got_content_length = False for header in i : # We don't support serving the result as chunked if header . lower ( ) == "transfer-encoding" : continue if header . lower ( ) == "content-length" : got_content_length = True response . headers [ header ] = i [ header ] # return the result result = res . read ( ) res . close ( ) #if not got_content_length: # response.headers['content-length'] = str(len(result)) return result
Do the actual action of proxying the call .
296
10
6,332
def open_file ( orig_file_path ) : unquoted = unquote ( orig_file_path ) paths = [ convert_to_platform_safe ( orig_file_path ) , "%s/index.html" % ( convert_to_platform_safe ( orig_file_path ) ) , orig_file_path , "%s/index.html" % orig_file_path , convert_to_platform_safe ( unquoted ) , "%s/index.html" % ( convert_to_platform_safe ( unquoted ) ) , unquoted , "%s/index.html" % unquoted , ] file_path = None handle = None for path in paths : try : file_path = path handle = open ( path , "rb" ) break except IOError : pass return handle
Taking in a file path attempt to open mock data files with it .
179
14
6,333
def attempt_open_query_permutations ( url , orig_file_path , is_header_file ) : directory = dirname ( convert_to_platform_safe ( orig_file_path ) ) + "/" # get all filenames in directory try : filenames = [ f for f in os . listdir ( directory ) if isfile ( join ( directory , f ) ) ] except OSError : return # ensure that there are not extra parameters on any files if is_header_file : filenames = [ f for f in filenames if ".http-headers" in f ] filenames = [ f for f in filenames if _compare_file_name ( orig_file_path + ".http-headers" , directory , f ) ] else : filenames = [ f for f in filenames if ".http-headers" not in f ] filenames = [ f for f in filenames if _compare_file_name ( orig_file_path , directory , f ) ] url_parts = url . split ( "/" ) url_parts = url_parts [ len ( url_parts ) - 1 ] . split ( "?" ) base = url_parts [ 0 ] params = url_parts [ 1 ] params = params . split ( "&" ) # check to ensure that the base url matches filenames = [ f for f in filenames if f . startswith ( base ) ] params = [ convert_to_platform_safe ( unquote ( p ) ) for p in params ] # ensure that all parameters are there for param in params : filenames = [ f for f in filenames if param in f ] # if we only have one file, return it if len ( filenames ) == 1 : path = join ( directory , filenames [ 0 ] ) return open_file ( path ) # if there is more than one file, raise an exception if len ( filenames ) > 1 : raise DataFailureException ( url , "Multiple mock data files matched the " + "parameters provided!" , 404 )
Attempt to open a given mock data file with different permutations of the query parameters
454
16
6,334
def lookup ( self , value ) : for k , v in self . iteritems ( ) : if value == v : return k return None
return the first key in dict where value is name
29
10
6,335
def _getLPA ( self ) : return str ( self . line ) + ":" + str ( self . pos ) + ":" + str ( self . absPosition )
Provides line pos and absPosition line as string
37
10
6,336
def _onImport ( self , name , line , pos , absPosition ) : if self . __lastImport is not None : self . imports . append ( self . __lastImport ) self . __lastImport = Import ( name , line , pos , absPosition ) return
Memorizes an import
57
5
6,337
def _onAs ( self , name ) : if self . __lastImport . what : self . __lastImport . what [ - 1 ] . alias = name else : self . __lastImport . alias = name return
Memorizes an alias for an import or an imported item
46
12
6,338
def comment_count ( obj ) : model_object = type ( obj ) . objects . get ( id = obj . id ) return model_object . comments . all ( ) . count ( )
returns the count of comments of an object
41
9
6,339
def profile_url ( obj , profile_app_name , profile_model_name ) : try : content_type = ContentType . objects . get ( app_label = profile_app_name , model = profile_model_name . lower ( ) ) profile = content_type . get_object_for_this_type ( user = obj . user ) return profile . get_absolute_url ( ) except ContentType . DoesNotExist : return "" except AttributeError : return ""
returns profile url of user
105
6
6,340
def img_url ( obj , profile_app_name , profile_model_name ) : try : content_type = ContentType . objects . get ( app_label = profile_app_name , model = profile_model_name . lower ( ) ) except ContentType . DoesNotExist : return "" except AttributeError : return "" Profile = content_type . model_class ( ) fields = Profile . _meta . get_fields ( ) profile = content_type . model_class ( ) . objects . get ( user = obj . user ) for field in fields : if hasattr ( field , "upload_to" ) : return field . value_from_object ( profile ) . url
returns url of profile image of a user
149
9
6,341
def get_comments ( obj , request , oauth = False , paginate = False , cpp = 10 ) : model_object = type ( obj ) . objects . get ( id = obj . id ) comments = Comment . objects . filter_by_object ( model_object ) comments_count = comments . count ( ) if paginate : paginator = Paginator ( comments , cpp ) page = request . GET . get ( 'page' ) try : comments = paginator . page ( page ) except PageNotAnInteger : comments = paginator . page ( 1 ) except EmptyPage : comments = paginator . page ( paginator . num_pages ) try : profile_app_name = settings . PROFILE_APP_NAME profile_model_name = settings . PROFILE_MODEL_NAME except AttributeError : profile_app_name = None profile_model_name = None try : if settings . LOGIN_URL . startswith ( "/" ) : login_url = settings . LOGIN_URL else : login_url = "/" + settings . LOGIN_URL except AttributeError : login_url = "" return { "commentform" : CommentForm ( ) , "model_object" : obj , "user" : request . user , "comments" : comments , # "comments_count": comments_count, "oauth" : oauth , "profile_app_name" : profile_app_name , "profile_model_name" : profile_model_name , "paginate" : paginate , "login_url" : login_url , "cpp" : cpp }
Retrieves list of comments related to a certain object and renders The appropriate template to view it
349
19
6,342
def save ( self ) : if self . save_option == 'curdir' : model_path = os . path . join ( os . getcwd ( ) , '{}.lcopt' . format ( self . name ) ) else : # default to appdir model_path = os . path . join ( storage . model_dir , '{}.lcopt' . format ( self . name ) ) model_path = fix_mac_path_escapes ( model_path ) with open ( model_path , 'wb' ) as model_file : pickle . dump ( self , model_file )
save the instance as a . lcopt file
132
10
6,343
def load ( self , filename ) : if filename [ - 6 : ] != ".lcopt" : filename += ".lcopt" try : savedInstance = pickle . load ( open ( "{}" . format ( filename ) , "rb" ) ) except FileNotFoundError : savedInstance = pickle . load ( open ( fix_mac_path_escapes ( os . path . join ( storage . model_dir , "{}" . format ( filename ) ) ) , "rb" ) ) attributes = [ 'name' , 'database' , 'params' , 'production_params' , 'allocation_params' , 'ext_params' , 'matrix' , 'names' , 'parameter_sets' , 'model_matrices' , 'technosphere_matrices' , 'leontif_matrices' , 'external_databases' , 'parameter_map' , 'sandbox_positions' , 'ecoinventName' , 'biosphereName' , 'forwastName' , 'analysis_settings' , 'technosphere_databases' , 'biosphere_databases' , 'result_set' , 'evaluated_parameter_sets' , 'useForwast' , 'base_project_name' , 'save_option' , 'allow_allocation' , 'ecoinvent_version' , 'ecoinvent_system_model' , ] for attr in attributes : if hasattr ( savedInstance , attr ) : setattr ( self , attr , getattr ( savedInstance , attr ) ) else : pass #print ("can't set {}".format(attr)) # use legacy save option if this is missing from the model if not hasattr ( savedInstance , 'save_option' ) : setattr ( self , 'save_option' , LEGACY_SAVE_OPTION ) # figure out ecoinvent version and system model if these are missing from the model if not hasattr ( savedInstance , 'ecoinvent_version' ) or not hasattr ( savedInstance , 'ecoinvent_system_model' ) : parts = savedInstance . ecoinventName . split ( "_" ) main_version = parts [ 0 ] [ - 1 ] sub_version = parts [ 1 ] system_model = parts [ 2 ] #print(parts) setattr ( self , 'ecoinvent_version' , '{}.{}' . format ( main_version , sub_version ) ) setattr ( self , 'ecoinvent_system_model' , system_model )
load data from a saved . lcopt file
552
10
6,344
def create_product ( self , name , location = 'GLO' , unit = 'kg' , * * kwargs ) : new_product = item_factory ( name = name , location = location , unit = unit , type = 'product' , * * kwargs ) if not self . exists_in_database ( new_product [ 'code' ] ) : self . add_to_database ( new_product ) #print ('{} added to database'.format(name)) return self . get_exchange ( name ) else : #print('{} already exists in this database'.format(name)) return False
Create a new product in the model database
136
8
6,345
def unlink_intermediate ( self , sourceId , targetId ) : source = self . database [ 'items' ] [ ( self . database . get ( 'name' ) , sourceId ) ] target = self . database [ 'items' ] [ ( self . database . get ( 'name' ) , targetId ) ] production_exchange = [ x [ 'input' ] for x in source [ 'exchanges' ] if x [ 'type' ] == 'production' ] [ 0 ] new_exchanges = [ x for x in target [ 'exchanges' ] if x [ 'input' ] != production_exchange ] target [ 'exchanges' ] = new_exchanges self . parameter_scan ( ) return True
Remove a link between two processes
158
6
6,346
def generate_parameter_set_excel_file ( self ) : parameter_sets = self . parameter_sets p_set = [ ] filename = "ParameterSet_{}_input_file.xlsx" . format ( self . name ) if self . save_option == 'curdir' : base_dir = os . getcwd ( ) else : base_dir = os . path . join ( storage . simapro_dir , self . name . replace ( " " , "_" ) ) if not os . path . isdir ( base_dir ) : os . mkdir ( base_dir ) p_set_name = os . path . join ( base_dir , filename ) p = self . params for k in p . keys ( ) : if p [ k ] [ 'function' ] is None : base_dict = { 'id' : k , 'name' : p [ k ] [ 'description' ] , 'unit' : p [ k ] [ 'unit' ] } for s in parameter_sets . keys ( ) : base_dict [ s ] = parameter_sets [ s ] [ k ] p_set . append ( base_dict ) else : pass #print("{} is determined by a function".format(p[k]['description'])) for e in self . ext_params : base_dict = { 'id' : '{}' . format ( e [ 'name' ] ) , 'type' : 'external' , 'name' : e [ 'description' ] , 'unit' : '' } for s in parameter_sets . keys ( ) : base_dict [ s ] = parameter_sets [ s ] [ e [ 'name' ] ] p_set . append ( base_dict ) df = pd . DataFrame ( p_set ) with pd . ExcelWriter ( p_set_name , engine = 'xlsxwriter' ) as writer : ps_columns = [ k for k in parameter_sets . keys ( ) ] #print (ps_columns) my_columns = [ 'name' , 'unit' , 'id' ] my_columns . extend ( ps_columns ) #print (my_columns) #print(df) df . to_excel ( writer , sheet_name = self . name , columns = my_columns , index = False , merge_cells = False ) return p_set_name
Generate an excel file containing the parameter sets in a format you can import into SimaPro Developer .
525
21
6,347
def add_parameter ( self , param_name , description = None , default = 0 , unit = None ) : if description is None : description = "Parameter called {}" . format ( param_name ) if unit is None : unit = "-" name_check = lambda x : x [ 'name' ] == param_name name_check_list = list ( filter ( name_check , self . ext_params ) ) if len ( name_check_list ) == 0 : self . ext_params . append ( { 'name' : param_name , 'description' : description , 'default' : default , 'unit' : unit } ) else : print ( '{} already exists - choose a different name' . format ( param_name ) )
Add a global parameter to the database that can be accessed by functions
162
13
6,348
def list_parameters_as_df ( self ) : to_df = [ ] for i , e in enumerate ( self . ext_params ) : row = { } row [ 'id' ] = e [ 'name' ] row [ 'coords' ] = "n/a" row [ 'description' ] = e [ 'description' ] row [ 'function' ] = "n/a" to_df . append ( row ) for pk in self . params : p = self . params [ pk ] row = { } row [ 'id' ] = pk row [ 'coords' ] = p [ 'coords' ] row [ 'description' ] = p [ 'description' ] row [ 'function' ] = p [ 'function' ] to_df . append ( row ) df = pd . DataFrame ( to_df ) return df
Only really useful when running from a jupyter notebook .
190
13
6,349
def import_external_db ( self , db_file , db_type = None ) : db = pickle . load ( open ( "{}.pickle" . format ( db_file ) , "rb" ) ) name = list ( db . keys ( ) ) [ 0 ] [ 0 ] new_db = { 'items' : db , 'name' : name } self . external_databases . append ( new_db ) if db_type is None : # Assume its a technosphere database db_type = 'technosphere' if db_type == 'technosphere' : self . technosphere_databases . append ( name ) elif db_type == 'biosphere' : self . biosphere_databases . append ( name ) else : raise Exception print ( "Database type must be 'technosphere' or 'biosphere'" )
Import an external database for use in lcopt
182
10
6,350
def search_databases ( self , search_term , location = None , markets_only = False , databases_to_search = None , allow_internal = False ) : dict_list = [ ] if allow_internal : internal_dict = { } for k , v in self . database [ 'items' ] . items ( ) : if v . get ( 'lcopt_type' ) == 'intermediate' : internal_dict [ k ] = v dict_list . append ( internal_dict ) if databases_to_search is None : #Search all of the databases available #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases]) dict_list += [ x [ 'items' ] for x in self . external_databases ] else : #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search]) dict_list += [ x [ 'items' ] for x in self . external_databases if x [ 'name' ] in databases_to_search ] data = Dictionaries ( * dict_list ) #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search]) query = Query ( ) if markets_only : market_filter = Filter ( "name" , "has" , "market for" ) query . add ( market_filter ) if location is not None : location_filter = Filter ( "location" , "is" , location ) query . add ( location_filter ) query . add ( Filter ( "name" , "ihas" , search_term ) ) result = query ( data ) return result
Search external databases linked to your lcopt model .
399
11
6,351
def export_to_bw2 ( self ) : my_exporter = Bw2Exporter ( self ) name , bw2db = my_exporter . export_to_bw2 ( ) return name , bw2db
Export the lcopt model in the native brightway 2 format
53
13
6,352
def analyse ( self , demand_item , demand_item_code ) : my_analysis = Bw2Analysis ( self ) self . result_set = my_analysis . run_analyses ( demand_item , demand_item_code , * * self . analysis_settings ) return True
Run the analyis of the model Doesn t return anything but creates a new item LcoptModel . result_set containing the results
62
27
6,353
def locate ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'locate' , * * { 'name' : name , 'locator' : ctx . locator , } )
Show resolve information about specified service .
57
7
6,354
def routing ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'routing' , * * { 'name' : name , 'locator' : ctx . locator , } )
Show information about the requested routing group .
57
8
6,355
def cluster ( resolve , * * kwargs ) : # Actually we have IPs and we need not do anything to resolve them to IPs. So the default # behavior fits better to this option name. ctx = Context ( * * kwargs ) ctx . execute_action ( 'cluster' , * * { 'locator' : ctx . locator , 'resolve' : resolve , } )
Show cluster info .
89
4
6,356
def info ( name , m , p , b , w , * * kwargs ) : m = ( m << 1 ) & 0b010 p = ( p << 2 ) & 0b100 # Brief disables all further flags. if b : flags = 0b000 else : flags = m | p | 0b001 ctx = Context ( * * kwargs ) ctx . execute_action ( 'info' , * * { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'locator' : ctx . locator , 'name' : name , 'flags' : flags , 'use_wildcard' : w , 'timeout' : ctx . timeout , } )
Show information about cocaine runtime .
157
6
6,357
def metrics ( ty , query , query_type , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'metrics' , * * { 'metrics' : ctx . repo . create_secure_service ( 'metrics' ) , 'ty' : ty , 'query' : query , 'query_type' : query_type , } )
Outputs runtime metrics collected from cocaine - runtime and its services .
90
13
6,358
def app_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Show uploaded applications .
61
4
6,359
def app_view ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Show manifest content for an application .
69
7
6,360
def app_import ( path , name , manifest , container_url , docker_address , registry , * * kwargs ) : lower_limit = 120.0 ctx = Context ( * * kwargs ) if ctx . timeout < lower_limit : ctx . timeout = lower_limit log . info ( 'shifted timeout to the %.2fs' , ctx . timeout ) if container_url and docker_address : ctx . execute_action ( 'app:import-docker' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'path' : path , 'name' : name , 'manifest' : manifest , 'container' : container_url , 'address' : docker_address , 'registry' : registry } ) else : raise ValueError ( "both `container_url` and `docker_address` options must not be empty" )
Import application Docker container .
201
5
6,361
def app_remove ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Remove application from storage .
69
5
6,362
def app_start ( name , profile , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:start' , * * { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'name' : name , 'profile' : profile } )
Start an application with specified profile .
76
7
6,363
def app_restart ( name , profile , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:restart' , * * { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'locator' : ctx . locator , 'name' : name , 'profile' : profile , } )
Restart application .
90
4
6,364
def check ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'app:check' , * * { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'name' : name , } )
Check application status .
67
4
6,365
def profile_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'profile:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Show uploaded profiles .
61
4
6,366
def profile_view ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'profile:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Show profile configuration content .
69
5
6,367
def profile_remove ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'profile:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Remove profile from the storage .
69
6
6,368
def runlist_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Show uploaded runlists .
63
5
6,369
def runlist_view ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name } )
Show configuration content for a specified runlist .
70
9
6,370
def runlist_upload ( name , runlist , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:upload' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'runlist' : runlist , } )
Upload runlist with context into the storage .
82
9
6,371
def runlist_create ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:create' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Create runlist and upload it into the storage .
71
10
6,372
def runlist_remove ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Remove runlist from the storage .
71
7
6,373
def runlist_add_app ( name , app , profile , force , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'runlist:add-app' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'app' : app , 'profile' : profile , 'force' : force } )
Add specified application with profile to the specified runlist .
98
11
6,374
def crashlog_status ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:status' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Show crashlogs status .
64
6
6,375
def crashlog_list ( name , day , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'day_string' : day , } )
Show crashlogs list for application .
82
8
6,376
def crashlog_view ( name , timestamp , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'timestamp' : timestamp , } )
Show crashlog for application with specified timestamp .
81
9
6,377
def crashlog_removeall ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:removeall' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Remove all crashlogs for application from the storage .
74
11
6,378
def crashlog_clean ( name , timestamp , size , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:clean' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'size' : size , 'timestamp' : timestamp , } )
For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP .
89
22
6,379
def crashlog_cleanrange ( from_day , up_to_day , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'crashlog:cleanwhen' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'from_day' : from_day , 'to_day' : up_to_day , } )
Remove all crashlogs from one date up to another .
98
12
6,380
def group_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Show available routing groups .
61
5
6,381
def group_view ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Show specified routing group .
69
5
6,382
def group_create ( name , content , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:create' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'content' : content , } )
Create routing group .
77
4
6,383
def group_remove ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Remove routing group from the storage .
69
7
6,384
def group_copy ( name , copyname , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:copy' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'copyname' : copyname , } )
Copy routing group .
83
4
6,385
def group_refresh ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:refresh' , * * { 'locator' : ctx . locator , 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Refresh routing group .
82
5
6,386
def group_push ( name , app , weight , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:app:add' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'app' : app , 'weight' : weight , } )
Add application with its weight into the routing group .
87
10
6,387
def group_pop ( name , app , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'group:app:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'app' : app , } )
Remove application from the specified routing group .
79
8
6,388
def unicorn_edit ( path , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . timeout = None ctx . execute_action ( 'unicorn:edit' , * * { 'unicorn' : ctx . repo . create_secure_service ( 'unicorn' ) , 'path' : path , } )
Edit Unicorn node interactively .
78
6
6,389
def logging_list_loggers ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'logging:list_loggers' , * * { 'logging_service' : ctx . repo . create_secure_service ( 'logging' ) , } )
List all registered logger names .
72
6
6,390
def logging_set_filter ( name , filter_def , ttl , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'logging:set_filter' , * * { 'logging_service' : ctx . repo . create_secure_service ( 'logging' ) , 'logger_name' : name , 'filter_def' : filter_def , 'ttl' : ttl , } )
Set local filter .
106
4
6,391
def logging_remove_filter ( filter_id , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'logging:remove_filter' , * * { 'logging_service' : ctx . repo . create_secure_service ( 'logging' ) , 'filter_id' : filter_id , } )
Remove filter by filter id .
84
6
6,392
def logging_list_filters ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'logging:list_filters' , * * { 'logging_service' : ctx . repo . create_secure_service ( 'logging' ) , } )
List all available filters .
72
5
6,393
def auth_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'auth:group:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Shows available authorization groups .
63
6
6,394
def auth_create ( name , token , force , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'auth:group:create' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'token' : token , 'force' : force , } )
Creates an authorization group .
87
6
6,395
def auth_edit ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . timeout = None ctx . execute_action ( 'auth:group:edit' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Interactively edits an authorization group .
77
7
6,396
def auth_remove ( name , drop , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'auth:group:remove' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'drop' : drop , } )
Removes an authorization group .
79
6
6,397
def auth_view ( name , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'auth:group:view' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
Shows an authorization group s content .
71
8
6,398
def auth_add ( name , service , * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'auth:group:members:add' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'service' : service , } )
Adds a member of an authorization group .
81
8
6,399
def access_storage_list ( * * kwargs ) : ctx = Context ( * * kwargs ) ctx . execute_action ( 'access:storage:list' , * * { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
Shows collections with ACL .
65
6