idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
242,100 | def _has_app ( self , app , webpage ) : # Search the easiest things first and save the full-text search of the # HTML for last for regex in app [ 'url' ] : if regex . search ( webpage . url ) : return True for name , regex in app [ 'headers' ] . items ( ) : if name in webpage . headers : content = webpage . headers [ name ] if regex . search ( content ) : return True for regex in app [ 'script' ] : for script in webpage . scripts : if regex . search ( script ) : return True for name , regex in app [ 'meta' ] . items ( ) : if name in webpage . meta : content = webpage . meta [ name ] if regex . search ( content ) : return True for regex in app [ 'html' ] : if regex . search ( webpage . html ) : return True | Determine whether the web page matches the app signature . | 185 | 12 |
242,101 | def _get_implied_apps ( self , detected_apps ) : def __get_implied_apps ( apps ) : _implied_apps = set ( ) for app in apps : try : _implied_apps . update ( set ( self . apps [ app ] [ 'implies' ] ) ) except KeyError : pass return _implied_apps implied_apps = __get_implied_apps ( detected_apps ) all_implied_apps = set ( ) # Descend recursively until we've found all implied apps while not all_implied_apps . issuperset ( implied_apps ) : all_implied_apps . update ( implied_apps ) implied_apps = __get_implied_apps ( all_implied_apps ) return all_implied_apps | Get the set of apps implied by detected_apps . | 176 | 11 |
242,102 | def get_categories ( self , app_name ) : cat_nums = self . apps . get ( app_name , { } ) . get ( "cats" , [ ] ) cat_names = [ self . categories . get ( "%s" % cat_num , "" ) for cat_num in cat_nums ] return cat_names | Returns a list of the categories for an app name . | 76 | 11 |
242,103 | def analyze ( self , webpage ) : detected_apps = set ( ) for app_name , app in self . apps . items ( ) : if self . _has_app ( app , webpage ) : detected_apps . add ( app_name ) detected_apps |= self . _get_implied_apps ( detected_apps ) return detected_apps | Return a list of applications that can be detected on the web page . | 76 | 14 |
242,104 | def analyze_with_categories ( self , webpage ) : detected_apps = self . analyze ( webpage ) categorised_apps = { } for app_name in detected_apps : cat_names = self . get_categories ( app_name ) categorised_apps [ app_name ] = { "categories" : cat_names } return categorised_apps | Return a list of applications and categories that can be detected on the web page . | 79 | 16 |
242,105 | def clean ( self ) : if self . _initialized : logger . info ( "brace yourselves, removing %r" , self . path ) shutil . rmtree ( self . path ) | remove the directory we operated on | 40 | 6 |
242,106 | def initialize ( self ) : if not self . _initialized : logger . info ( "initializing %r" , self ) if not os . path . exists ( self . path ) : if self . mode is not None : os . makedirs ( self . path , mode = self . mode ) else : os . makedirs ( self . path ) self . _set_mode ( ) self . _add_facl_rules ( ) self . _set_selinux_context ( ) self . _set_ownership ( ) self . _initialized = True logger . info ( "initialized" ) return logger . info ( "%r was already initialized" , self ) | create the directory if needed and configure it | 143 | 8 |
242,107 | def _set_selinux_context ( self ) : chcon_command_exists ( ) # FIXME: do this using python API if possible if self . selinux_context : logger . debug ( "setting SELinux context of %s to %s" , self . path , self . selinux_context ) run_cmd ( [ "chcon" , self . selinux_context , self . path ] ) if any ( [ self . selinux_user , self . selinux_role , self . selinux_type , self . selinux_range ] ) : logger . debug ( "setting SELinux fields of %s" , self . path , self . selinux_context ) # chcon [OPTION]... [-u USER] [-r ROLE] [-l RANGE] [-t TYPE] FILE... pairs = [ ( "-u" , self . selinux_user ) , ( "-r" , self . selinux_role ) , ( "-l" , self . selinux_range ) , ( "-t" , self . selinux_type ) ] c = [ "chcon" ] for p in pairs : if p [ 1 ] : c += p c += [ self . path ] run_cmd ( c ) | Set SELinux context or fields using chcon program . Raises CommandDoesNotExistException if the command is not present on the system . | 273 | 31 |
242,108 | def _set_mode ( self ) : if self . mode is not None : logger . debug ( "changing permission bits of %s to %s" , self . path , oct ( self . mode ) ) os . chmod ( self . path , self . mode ) | set permission bits if needed using python API os . chmod | 57 | 12 |
242,109 | def _add_facl_rules ( self ) : setfacl_command_exists ( ) # we are not using pylibacl b/c it's only for python 2 if self . facl_rules : logger . debug ( "adding ACLs %s to %s" , self . facl_rules , self . path ) r = "," . join ( self . facl_rules ) run_cmd ( [ "setfacl" , "-m" , r , self . path ] ) | Apply ACL rules on the directory using setfacl program . Raises CommandDoesNotExistException if the command is not present on the system . | 110 | 30 |
242,110 | def get_volume_options ( volumes ) : if not isinstance ( volumes , list ) : volumes = [ volumes ] volumes = [ Volume . create_from_tuple ( v ) for v in volumes ] result = [ ] for v in volumes : result += [ "-v" , str ( v ) ] return result | Generates volume options to run methods . | 67 | 8 |
242,111 | def layers ( self , rev = True ) : image_layers = [ PodmanImage ( None , identifier = x , pull_policy = PodmanImagePullPolicy . NEVER ) for x in self . get_layer_ids ( ) ] if not rev : image_layers . reverse ( ) return image_layers | Get list of PodmanImage for every layer in image | 68 | 11 |
242,112 | def get_metadata ( self ) : if self . _metadata is None : self . _metadata = ImageMetadata ( ) inspect_to_metadata ( self . _metadata , self . inspect ( refresh = True ) ) return self . _metadata | Provide metadata about this image . | 51 | 7 |
242,113 | def is_running ( self ) : try : return graceful_get ( self . inspect ( refresh = True ) , "State" , "Running" ) except subprocess . CalledProcessError : return False | returns True if the container is running | 42 | 8 |
242,114 | def is_port_open ( self , port , timeout = 2 ) : addresses = self . get_IPv4s ( ) if not addresses : return False return check_port ( port , host = addresses [ 0 ] , timeout = timeout ) | check if given port is open and receiving connections on container ip_address | 52 | 14 |
242,115 | def wait_for_port ( self , port , timeout = 10 , * * probe_kwargs ) : Probe ( timeout = timeout , fnc = functools . partial ( self . is_port_open , port ) , * * probe_kwargs ) . run ( ) | block until specified port starts accepting connections raises an exc ProbeTimeout if timeout is reached | 60 | 16 |
242,116 | def mount ( self , mount_point = None ) : cmd = [ "podman" , "mount" , self . _id or self . get_id ( ) ] output = run_cmd ( cmd , return_output = True ) . rstrip ( "\n\r" ) return output | mount container filesystem | 63 | 3 |
242,117 | def wait ( self , timeout = None ) : timeout = [ "--interval=%s" % timeout ] if timeout else [ ] cmdline = [ "podman" , "wait" ] + timeout + [ self . _id or self . get_id ( ) ] return run_cmd ( cmdline , return_output = True ) | Block until the container stops then return its exit code . Similar to the podman wait command . | 73 | 19 |
242,118 | def read_file ( self , file_path ) : try : with open ( self . p ( file_path ) ) as fd : return fd . read ( ) except IOError as ex : logger . error ( "error while accessing file %s: %r" , file_path , ex ) raise ConuException ( "There was an error while accessing file %s: %r" , file_path , ex ) | read file specified via file_path and return its content - raises an ConuException if there is an issue accessing the file | 91 | 25 |
242,119 | def get_file ( self , file_path , mode = "r" ) : return open ( self . p ( file_path ) , mode = mode ) | provide File object specified via file_path | 34 | 9 |
242,120 | def file_is_present ( self , file_path ) : p = self . p ( file_path ) if not os . path . exists ( p ) : return False if not os . path . isfile ( p ) : raise IOError ( "%s is not a file" % file_path ) return True | check if file file_path is present raises IOError if file_path is not a file | 67 | 19 |
242,121 | def directory_is_present ( self , directory_path ) : p = self . p ( directory_path ) if not os . path . exists ( p ) : return False if not os . path . isdir ( p ) : raise IOError ( "%s is not a directory" % directory_path ) return True | check if directory directory_path is present raise IOError if it s not a directory | 67 | 17 |
242,122 | def get_selinux_context ( self , file_path ) : # what if SELinux is not enabled? p = self . p ( file_path ) if not HAS_XATTR : raise RuntimeError ( "'xattr' python module is not available, hence we cannot " "determine the SELinux context for this file. " "In Fedora this module is available as python3-pyxattr -- " "other distributions may follow similar naming scheme." ) return xattr . get ( p , "security.selinux" ) | Get SELinux file context of the selected file . | 120 | 12 |
242,123 | def _wrapper ( self , q , start ) : try : func_name = self . fnc . __name__ except AttributeError : func_name = str ( self . fnc ) logger . debug ( "Running \"%s\" with parameters: \"%s\":\t%s/%s" % ( func_name , str ( self . kwargs ) , round ( time . time ( ) - start ) , self . timeout ) ) try : result = self . fnc ( * * self . kwargs ) # let's log only first 50 characters of the response logger . debug ( "callback result = %s" , str ( result ) [ : 50 ] ) q . put ( result ) except self . expected_exceptions as ex : logger . debug ( "expected exception was caught: %s" , ex ) q . put ( False ) except Exception as ex : logger . debug ( "adding exception %s to queue" , ex ) q . put ( ex ) | _wrapper checks return status of Probe . fnc and provides the result for process managing | 210 | 17 |
242,124 | def transport_param ( image ) : transports = { SkopeoTransport . CONTAINERS_STORAGE : "containers-storage:" , SkopeoTransport . DIRECTORY : "dir:" , SkopeoTransport . DOCKER : "docker://" , SkopeoTransport . DOCKER_ARCHIVE : "docker-archive" , SkopeoTransport . DOCKER_DAEMON : "docker-daemon:" , SkopeoTransport . OCI : "oci:" , SkopeoTransport . OSTREE : "ostree:" } transport = image . transport tag = image . tag repository = image . name path = image . path if not transport : transport = SkopeoTransport . DOCKER command = transports [ transport ] path_required = [ SkopeoTransport . DIRECTORY , SkopeoTransport . DOCKER_ARCHIVE , SkopeoTransport . OCI ] if transport in path_required and path is None : raise ValueError ( transports [ transport ] + " path is required to be specified" ) if transport == SkopeoTransport . DIRECTORY : return command + path if transport == SkopeoTransport . DOCKER_ARCHIVE : command += path if repository is None : return command command += ":" if transport in [ SkopeoTransport . CONTAINERS_STORAGE , SkopeoTransport . DOCKER , SkopeoTransport . DOCKER_ARCHIVE , transport . DOCKER_DAEMON ] : return command + repository + ":" + tag if transport == SkopeoTransport . OCI : return command + path + ":" + tag if transport == SkopeoTransport . OSTREE : return command + repository + ( "@" + path if path else "" ) raise ConuException ( "This transport is not supported" ) | Parse DockerImage info into skopeo parameter | 403 | 10 |
242,125 | def is_running ( self ) : cmd = [ "machinectl" , "--no-pager" , "status" , self . name ] try : subprocess . check_call ( cmd ) return True except subprocess . CalledProcessError as ex : logger . info ( "nspawn container %s is not running probably: %s" , self . name , ex . output ) return False | return True when container is running otherwise return False | 87 | 9 |
242,126 | def copy_from ( self , src , dest ) : logger . debug ( "copying %s from host to container at %s" , src , dest ) cmd = [ "machinectl" , "--no-pager" , "copy-from" , self . name , src , dest ] run_cmd ( cmd ) | copy a file or a directory from container or image to host system . | 73 | 14 |
242,127 | def delete ( self , force = False , volumes = False ) : try : self . image . rmi ( ) except ConuException as ime : if not force : raise ime else : pass | delete underlying image | 42 | 3 |
242,128 | def cleanup ( self , force = False , delete = False ) : # TODO: this method could be part of API, like: try : self . stop ( ) except subprocess . CalledProcessError as stop : logger . debug ( "unable to stop container via stop" , stop ) if not force : raise stop try : self . kill ( ) except subprocess . CalledProcessError as kill : logger . debug ( "unable to stop container via kill" , kill ) pass if delete : self . delete ( force = force ) | Stop container and delete image if given param delete | 112 | 9 |
242,129 | def run_systemdrun ( self , command , internal_background = False , return_full_dict = False , * * kwargs ) : internalkw = deepcopy ( kwargs ) or { } original_ignore_st = internalkw . get ( "ignore_status" , False ) original_return_st = internalkw . get ( "return_output" , False ) internalkw [ "ignore_status" ] = True internalkw [ "return_output" ] = False unit_name = constants . CONU_ARTIFACT_TAG + "unit_" + random_str ( ) opts = [ "-M" , self . name , "--unit" , unit_name ] lpath = "/var/tmp/{}" . format ( unit_name ) comout = { } if self . _run_systemdrun_decide ( ) : add_wait_var = "--wait" else : # keep service exist after it finish, to be able to read exit code add_wait_var = "-r" if internal_background : add_wait_var = "" if add_wait_var : opts . append ( add_wait_var ) # TODO: behave move similar to run_cmd function, unable to work with clean subprocess objects because systemd-run # does not support return stderr, stdout, and return code directly # find way how to do this in better way, machinectl shell is not possible # https://github.com/systemd/systemd/issues/5879 # https://github.com/systemd/systemd/issues/5878 bashworkaround = [ "/bin/bash" , "-c" , "({comm})>{path}.stdout 2>{path}.stderr" . format ( comm = " " . join ( command ) , path = lpath ) ] whole_cmd = [ "systemd-run" ] + opts + bashworkaround comout [ 'command' ] = command comout [ 'return_code' ] = run_cmd ( whole_cmd , * * internalkw ) or 0 if not internal_background : if not self . _run_systemdrun_decide ( ) : comout [ 'return_code' ] = self . _systemctl_wait_until_finish ( self . name , unit_name ) if self . is_running ( ) : self . copy_from ( "{pin}.stdout" . format ( pin = lpath ) , "{pin}.stdout" . format ( pin = lpath ) ) with open ( "{pin}.stdout" . format ( pin = lpath ) ) as f : comout [ 'stdout' ] = f . read ( ) self . copy_from ( "{pin}.stderr" . format ( pin = lpath ) , "{pin}.stderr" . format ( pin = lpath ) ) with open ( "{pin}.stderr" . format ( pin = lpath ) ) as f : comout [ 'stderr' ] = f . read ( ) logger . debug ( comout ) if not original_ignore_st and comout [ 'return_code' ] != 0 : raise subprocess . CalledProcessError ( comout [ 'command' ] , comout ) if return_full_dict : return comout if original_return_st : return comout [ 'stdout' ] else : return comout [ 'return_code' ] | execute command via systemd - run inside container | 752 | 8 |
242,130 | def _wait_for_machine_booted ( name , suffictinet_texts = None ) : # TODO: rewrite it using probes module in utils suffictinet_texts = suffictinet_texts or [ "systemd-logind" ] # optionally use: "Unit: machine" for foo in range ( constants . DEFAULT_RETRYTIMEOUT ) : time . sleep ( constants . DEFAULT_SLEEP ) out = run_cmd ( [ "machinectl" , "--no-pager" , "status" , name ] , ignore_status = True , return_output = True ) for restr in suffictinet_texts : if restr in out : time . sleep ( constants . DEFAULT_SLEEP ) return True raise ConuException ( "Unable to start machine %s within %d (machinectl status command dos not contain %s)" % ( name , constants . DEFAULT_RETRYTIMEOUT , suffictinet_texts ) ) | Internal method wait until machine is ready in common case means there is running systemd - logind | 222 | 18 |
242,131 | def _internal_reschedule ( callback , retry = 3 , sleep_time = constants . DEFAULT_SLEEP ) : for foo in range ( retry ) : container_process = callback [ 0 ] ( callback [ 1 ] , * callback [ 2 ] , * * callback [ 3 ] ) time . sleep ( sleep_time ) container_process . poll ( ) rcode = container_process . returncode if rcode is None : return container_process raise ConuException ( "Unable to start nspawn container - process failed for {}-times" . format ( retry ) ) | workaround method for internal_run_container method It sometimes fails because of Dbus or whatever so try to start it moretimes | 127 | 26 |
242,132 | def internal_run_container ( name , callback_method , foreground = False ) : if not foreground : logger . info ( "Stating machine (boot nspawn container) {}" . format ( name ) ) # wait until machine is booted when running at background, unable to execute commands without logind # in running container nspawn_process = NspawnContainer . _internal_reschedule ( callback_method ) NspawnContainer . _wait_for_machine_booted ( name ) logger . info ( "machine: %s starting finished" % name ) return nspawn_process else : logger . info ( "Stating machine (return process) {}" . format ( name ) ) return callback_method [ 0 ] ( callback_method [ 1 ] , * callback_method [ 2 ] , * * callback_method [ 3 ] ) | Internal method what runs container process | 176 | 6 |
242,133 | def get_container_output ( backend , image_name , command , image_tag = "latest" , additional_opts = None ) : image = backend . ImageClass ( image_name , tag = image_tag ) # FIXME: use run_via_api and make this a generic function c = image . run_via_binary ( DockerRunBuilder ( command = command , additional_opts = additional_opts ) ) try : c . wait ( ) return c . logs_unicode ( ) finally : c . stop ( ) c . wait ( ) c . delete ( ) | Create a throw - away container based on provided image and tag run the supplied command in it and return output . The container is stopped and removed after it exits . | 126 | 32 |
242,134 | def pull ( self ) : for json_e in self . d . pull ( repository = self . name , tag = self . tag , stream = True , decode = True ) : logger . debug ( json_e ) status = graceful_get ( json_e , "status" ) if status : logger . info ( status ) else : error = graceful_get ( json_e , "error" ) logger . error ( status ) raise ConuException ( "There was an error while pulling the image %s: %s" , self . name , error ) self . using_transport ( SkopeoTransport . DOCKER_DAEMON ) | Pull this image from registry . Raises an exception if the image is not found in the registry . | 139 | 20 |
242,135 | def using_transport ( self , transport = None , path = None , logs = True ) : if not transport : return self if self . transport == transport and self . path == path : return self path_required = [ SkopeoTransport . DIRECTORY , SkopeoTransport . DOCKER_ARCHIVE , SkopeoTransport . OCI ] if transport in path_required : if not path and logs : logging . debug ( "path not provided, temporary path was used" ) self . path = self . mount ( path ) . mount_point elif transport == SkopeoTransport . OSTREE : if path and not os . path . isabs ( path ) : raise ConuException ( "Path '" , path , "' for OSTree transport is not absolute" ) if not path and logs : logging . debug ( "path not provided, default /ostree/repo path was used" ) self . path = path else : if path and logs : logging . warning ( "path %s was ignored!" , path ) self . path = None self . transport = transport return self | change used transport | 236 | 3 |
242,136 | def save_to ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid target image type" , type ( image ) ) self . copy ( image . name , image . tag , target_transport = image . transport , target_path = image . path , logs = False ) | Save this image to another DockerImage | 74 | 7 |
242,137 | def load_from ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid source image type" , type ( image ) ) image . save_to ( self ) | Load from another DockerImage to this one | 49 | 8 |
242,138 | def skopeo_pull ( self ) : return self . copy ( self . name , self . tag , SkopeoTransport . DOCKER , SkopeoTransport . DOCKER_DAEMON ) . using_transport ( SkopeoTransport . DOCKER_DAEMON ) | Pull image from Docker to local Docker daemon using skopeo | 67 | 12 |
242,139 | def skopeo_push ( self , repository = None , tag = None ) : return self . copy ( repository , tag , SkopeoTransport . DOCKER_DAEMON , SkopeoTransport . DOCKER ) . using_transport ( SkopeoTransport . DOCKER ) | Push image from Docker daemon to Docker using skopeo | 67 | 11 |
242,140 | def copy ( self , repository = None , tag = None , source_transport = None , target_transport = SkopeoTransport . DOCKER , source_path = None , target_path = None , logs = True ) : if not repository : repository = self . name if not tag : tag = self . tag if self . tag else "latest" if target_transport == SkopeoTransport . OSTREE and tag and logs : logging . warning ( "tag was ignored" ) target = ( DockerImage ( repository , tag , pull_policy = DockerImagePullPolicy . NEVER ) . using_transport ( target_transport , target_path ) ) self . using_transport ( source_transport , source_path ) try : run_cmd ( [ "skopeo" , "copy" , transport_param ( self ) , transport_param ( target ) ] ) except subprocess . CalledProcessError : raise ConuException ( "There was an error while copying repository" , self . name ) return target | Copy this image | 222 | 3 |
242,141 | def tag_image ( self , repository = None , tag = None ) : if not ( repository or tag ) : raise ValueError ( "You need to specify either repository or tag." ) r = repository or self . name t = "latest" if not tag else tag self . d . tag ( image = self . get_full_name ( ) , repository = r , tag = t ) return DockerImage ( r , tag = t ) | Apply additional tags to the image or even add a new name | 92 | 12 |
242,142 | def inspect ( self , refresh = True ) : if refresh or not self . _inspect_data : identifier = self . _id or self . get_full_name ( ) if not identifier : raise ConuException ( "This image does not have a valid identifier." ) self . _inspect_data = self . d . inspect_image ( identifier ) return self . _inspect_data | provide metadata about the image ; flip refresh = True if cached metadata are enough | 84 | 16 |
242,143 | def has_pkgs_signed_with ( self , allowed_keys ) : if not allowed_keys or not isinstance ( allowed_keys , list ) : raise ConuException ( "allowed_keys must be a list" ) command = [ 'rpm' , '-qa' , '--qf' , '%{name} %{SIGPGP:pgpsig}\n' ] cont = self . run_via_binary ( command = command ) try : out = cont . logs_unicode ( ) [ : - 1 ] . split ( '\n' ) check_signatures ( out , allowed_keys ) finally : cont . stop ( ) cont . delete ( ) return True | Check signature of packages installed in image . Raises exception when | 150 | 12 |
242,144 | def build ( cls , path , tag = None , dockerfile = None ) : if not path : raise ConuException ( 'Please specify path to the directory containing the Dockerfile' ) client = get_client ( ) response = [ line for line in client . build ( path , rm = True , tag = tag , dockerfile = dockerfile , quiet = True ) ] if not response : raise ConuException ( 'Failed to get ID of image' ) # The expected output is just one line with image ID if len ( response ) > 1 : raise ConuException ( 'Build failed: ' + str ( response ) ) # get ID from output # b'{"stream":"sha256:39c7bac4e2da37983203df4fcf612a02de9e6f6456a7f3434d1fccbc9ad639a5\\n"}\r\n' response_utf = response [ 0 ] . decode ( 'utf-8' ) if response_utf [ : 11 ] != '{"stream":"' or response_utf [ - 6 : ] != '\\n"}\r\n' : raise ConuException ( 'Failed to parse ID from ' + response_utf ) image_id = response_utf [ 11 : - 6 ] return cls ( None , identifier = image_id ) | Build the image from the provided dockerfile in path | 292 | 10 |
242,145 | def layers ( self , rev = True ) : image_layers = [ DockerImage ( None , identifier = x , pull_policy = DockerImagePullPolicy . NEVER ) for x in self . get_layer_ids ( ) ] if not rev : image_layers . reverse ( ) return image_layers | Get list of DockerImage for every layer in image | 66 | 10 |
242,146 | def extend ( self , source , new_image_name , s2i_args = None ) : s2i_args = s2i_args or [ ] c = self . _s2i_command ( [ "build" ] + s2i_args + [ source , self . get_full_name ( ) ] ) if new_image_name : c . append ( new_image_name ) try : run_cmd ( c ) except subprocess . CalledProcessError as ex : raise ConuException ( "s2i build failed: %s" % ex ) return S2IDockerImage ( new_image_name ) | extend this s2i - enabled image using provided source raises ConuException if s2i build fails | 139 | 22 |
242,147 | def usage ( self ) : c = self . _s2i_command ( [ "usage" , self . get_full_name ( ) ] ) with open ( os . devnull , "w" ) as fd : process = subprocess . Popen ( c , stdout = fd , stderr = subprocess . PIPE ) _ , output = process . communicate ( ) retcode = process . poll ( ) if retcode : raise ConuException ( "`s2i usage` failed: %s" % output ) return output . decode ( "utf-8" ) . strip ( ) | Provide output of s2i usage | 132 | 8 |
242,148 | def http_request ( self , path = "/" , method = "GET" , host = None , port = None , json = False , data = None ) : host = host or '127.0.0.1' port = port or 8080 url = get_url ( host = host , port = port , path = path ) return self . http_session . request ( method , url , json = json , data = data ) | perform a HTTP request | 93 | 5 |
242,149 | def system_requirements ( ) : command_exists ( "systemd-nspawn" , [ "systemd-nspawn" , "--version" ] , "Command systemd-nspawn does not seems to be present on your system" "Do you have system with systemd" ) command_exists ( "machinectl" , [ "machinectl" , "--no-pager" , "--help" ] , "Command machinectl does not seems to be present on your system" "Do you have system with systemd" ) if "Enforcing" in run_cmd ( [ "getenforce" ] , return_output = True , ignore_status = True ) : logger . error ( "Please disable selinux (setenforce 0), selinux blocks some nspawn operations" "This may lead to strange behaviour" ) | Check if all necessary packages are installed on system | 185 | 9 |
242,150 | def _generate_id ( self ) : name = self . name . replace ( self . special_separator , "-" ) . replace ( "." , "-" ) loc = "\/" if self . location : loc = self . location _id = "{PREFIX}{SEP}{NAME}{HASH}{SEP}" . format ( PREFIX = constants . CONU_ARTIFACT_TAG , NAME = name , HASH = hashlib . sha512 ( loc ) . hexdigest ( ) [ : 10 ] , SEP = self . special_separator ) return _id | create new unique identifier | 128 | 4 |
242,151 | def pull ( self ) : if not os . path . exists ( CONU_IMAGES_STORE ) : os . makedirs ( CONU_IMAGES_STORE ) logger . debug ( "Try to pull: {} -> {}" . format ( self . location , self . local_location ) ) if not self . _is_local ( ) : compressed_location = self . local_location + ".xz" run_cmd ( [ "curl" , "-f" , "-L" , "-o" , compressed_location , self . location ] ) run_cmd ( [ "xz" , "-d" , compressed_location ] ) else : if self . location . endswith ( "xz" ) : compressed_location = self . local_location + ".xz" run_cmd ( [ "cp" , self . location , compressed_location ] ) run_cmd ( [ "xz" , "-d" , compressed_location ] ) else : run_cmd ( [ "cp" , self . location , self . local_location ] ) | Pull this image from URL . | 231 | 6 |
242,152 | def run_via_binary ( self , command = None , foreground = False , volumes = None , additional_opts = None , default_options = None , name = None , * args , * * kwargs ) : command = deepcopy ( command ) or [ ] volumes = deepcopy ( volumes ) or [ ] additional_opts = deepcopy ( additional_opts ) or [ ] internalkw = deepcopy ( kwargs ) or { } inernalargs = deepcopy ( args ) or [ ] if default_options is None : default_options = [ "-b" ] # TODO: reconsile parameters (changed from API definition) logger . info ( "run container via binary in background" ) machine_name = constants . CONU_ARTIFACT_TAG if name : machine_name += name else : machine_name += random_str ( ) if not foreground : # WARN: avoid to run boot without stderr and stdout to terminal, it breaks terminal, # it systemd-nspawn does some magic with console # TODO: is able to avoid this behaviour in better way? internalkw [ "stdout" ] = subprocess . PIPE internalkw [ "stderr" ] = subprocess . PIPE additional_opts += default_options if volumes : additional_opts += self . get_volume_options ( volumes = volumes ) logger . debug ( "starting NSPAWN" ) systemd_command = [ "systemd-nspawn" , "--machine" , machine_name , "-i" , self . local_location ] + additional_opts + command logger . debug ( "Start command: %s" % " " . join ( systemd_command ) ) callback_method = ( subprocess . Popen , systemd_command , inernalargs , internalkw ) self . container_process = NspawnContainer . internal_run_container ( name = machine_name , callback_method = callback_method , foreground = foreground ) if foreground : return self . container_process else : return NspawnContainer ( self , None , name = machine_name , start_process = self . container_process , start_action = callback_method ) | Create new instance NspawnContianer in case of not running at foreground in case foreground run return process object | 470 | 22 |
242,153 | def process_rpm_ql_line ( line_str , allowed_keys ) : try : name , key_str = line_str . split ( ' ' , 1 ) except ValueError : logger . error ( "Failed to split line '{0}" . format ( repr ( line_str ) ) ) return False if name in no_key_pkgs : return True if key_str == NONE_KEY : logger . error ( "Unsigned package {0}" . format ( name ) ) return False key_match = re . match ( KEY , key_str ) if not key_match : logger . error ( 'Could not process line "{0}"' . format ( line_str ) ) return False used_key = key_match . group ( 1 ) if used_key in allowed_keys : return True logger . error ( "Wrong key for '{0}' ({1})" . format ( name , used_key ) ) return False | Checks single line of rpm - ql for correct keys | 205 | 12 |
242,154 | def check_signatures ( pkg_list , allowed_keys ) : all_passed = True for line_str in pkg_list : all_passed &= process_rpm_ql_line ( line_str . strip ( ) , allowed_keys ) if not all_passed : raise PackageSignatureException ( 'Error while checking rpm signatures, see logs for more info' ) | Go through list of packages with signatures and check if all are properly signed | 85 | 14 |
242,155 | def get_ports ( self ) : ports = [ ] container_ports = self . inspect ( refresh = True ) [ "NetworkSettings" ] [ "Ports" ] if not container_ports : return ports for p in container_ports : # TODO: gracefullness, error handling ports . append ( p . split ( "/" ) [ 0 ] ) return ports | get ports specified in container metadata | 78 | 6 |
242,156 | def _clean_tmp_dirs ( self ) : def onerror ( fnc , path , excinfo ) : # we might not have rights to do this, the files could be owned by root self . logger . info ( "we were not able to remove temporary file %s: %s" , path , excinfo [ 1 ] ) shutil . rmtree ( self . tmpdir , onerror = onerror ) self . tmpdir = None global _backend_tmpdir _backend_tmpdir = None | Remove temporary dir associated with this backend instance . | 111 | 9 |
242,157 | def _clean ( self ) : if CleanupPolicy . EVERYTHING in self . cleanup : self . cleanup_containers ( ) self . cleanup_volumes ( ) self . cleanup_images ( ) self . _clean_tmp_dirs ( ) else : if CleanupPolicy . CONTAINERS in self . cleanup : self . cleanup_containers ( ) if CleanupPolicy . VOLUMES in self . cleanup : self . cleanup_volumes ( ) if CleanupPolicy . IMAGES in self . cleanup : self . cleanup_images ( ) if CleanupPolicy . TMP_DIRS in self . cleanup : self . _clean_tmp_dirs ( ) | Method for cleaning according to object cleanup policy value | 141 | 9 |
242,158 | def list_containers ( self ) : data = run_cmd ( [ "machinectl" , "list" , "--no-legend" , "--no-pager" ] , return_output = True ) output = [ ] reg = re . compile ( r"\s+" ) for line in data . split ( "\n" ) : stripped = line . strip ( ) if stripped : parts = reg . split ( stripped ) name = parts [ 0 ] output . append ( self . ContainerClass ( None , None , name = name ) ) return output | list all available nspawn containers | 124 | 6 |
242,159 | def list_images ( self ) : # Fedora-Cloud-Base-27-1.6.x86_64 raw no 601.7M Sun 2017-11-05 08:30:10 CET \ # Sun 2017-11-05 08:30:10 CET data = os . listdir ( CONU_IMAGES_STORE ) output = [ ] for name in data : output . append ( self . ImageClass ( name , pull_policy = ImagePullPolicy . NEVER ) ) return output | list all available nspawn images | 106 | 6 |
242,160 | def cleanup_containers ( self ) : for cont in self . list_containers ( ) : if CONU_ARTIFACT_TAG in cont . name : try : logger . debug ( "removing container %s created by conu" , cont ) # TODO: move this functionality to container.delete run_cmd ( [ "machinectl" , "terminate" , cont . name ] ) except subprocess . CalledProcessError as e : logger . error ( "unable to remove container %s: %r" , cont , e ) | stop all container created by conu | 120 | 7 |
242,161 | def check_port ( port , host , timeout = 10 ) : logger . info ( "trying to open connection to %s:%s" , host , port ) sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : sock . settimeout ( timeout ) result = sock . connect_ex ( ( host , port ) ) logger . info ( "was connection successful? errno: %s" , result ) if result == 0 : logger . debug ( 'port is opened: %s:%s' % ( host , port ) ) return True else : logger . debug ( 'port is closed: %s:%s' % ( host , port ) ) return False finally : sock . close ( ) | connect to port on host and return True on success | 160 | 10 |
242,162 | def get_selinux_status ( ) : getenforce_command_exists ( ) # alternatively, we could read directly from /sys/fs/selinux/{enforce,status}, but status is # empty (why?) and enforce doesn't tell whether SELinux is disabled or not o = run_cmd ( [ "getenforce" ] , return_output = True ) . strip ( ) # libselinux-utils logger . debug ( "SELinux is %r" , o ) return o | get SELinux status of host | 115 | 8 |
242,163 | def random_str ( size = 10 ) : return '' . join ( random . choice ( string . ascii_lowercase ) for _ in range ( size ) ) | create random string of selected size | 36 | 6 |
242,164 | def run_cmd ( cmd , return_output = False , ignore_status = False , log_output = True , * * kwargs ) : logger . debug ( 'command: "%s"' % ' ' . join ( cmd ) ) process = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , universal_newlines = True , * * kwargs ) output = process . communicate ( ) [ 0 ] if log_output : logger . debug ( output ) if process . returncode > 0 : if ignore_status : if return_output : return output else : return process . returncode else : raise subprocess . CalledProcessError ( cmd = cmd , returncode = process . returncode ) if return_output : return output | run provided command on host system using the same user as you invoked this code raises subprocess . CalledProcessError if it fails | 171 | 25 |
242,165 | def command_exists ( command , noop_invocation , exc_msg ) : try : found = bool ( shutil . which ( command ) ) # py3 only except AttributeError : # py2 branch try : p = subprocess . Popen ( noop_invocation , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except OSError : found = False else : stdout , stderr = p . communicate ( ) found = p . returncode == 0 if not found : logger . error ( "`%s` exited with a non-zero return code (%s)" , noop_invocation , p . returncode ) logger . error ( "command stdout = %s" , stdout ) logger . error ( "command stderr = %s" , stderr ) if not found : raise CommandDoesNotExistException ( exc_msg ) return True | Verify that the provided command exists . Raise CommandDoesNotExistException in case of an error or if the command does not exist . | 202 | 28 |
242,166 | def check_docker_command_works ( ) : try : out = subprocess . check_output ( [ "docker" , "version" ] , stderr = subprocess . STDOUT , universal_newlines = True ) except OSError : logger . info ( "docker binary is not available" ) raise CommandDoesNotExistException ( "docker command doesn't seem to be available on your system. " "Please install and configure docker." ) except subprocess . CalledProcessError as ex : logger . error ( "exception: %s" , ex ) logger . error ( "rc: %s, output: %r" , ex . returncode , ex . output ) raise ConuException ( "`docker version` call failed, it seems that your docker daemon is misconfigured or " "this user can't communicate with dockerd." ) else : logger . info ( "docker environment info: %r" , out ) return True | Verify that dockerd and docker binary works fine . This is performed by calling docker version which also checks server API version . | 201 | 25 |
242,167 | def export_docker_container_to_directory ( client , container , path ) : # we don't do this because of a bug in docker: # https://bugzilla.redhat.com/show_bug.cgi?id=1570828 # stream, _ = client.get_archive(container.get_id(), "/") check_docker_command_works ( ) export_p = subprocess . Popen ( [ "docker" , "export" , container . get_id ( ) ] , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) try : os . mkdir ( path , 0o0700 ) except OSError as ex : if ex . errno == errno . EEXIST : logger . debug ( "mount point %s exists already" , path ) else : logger . error ( "mount point %s can't be created: %s" , path , ex ) raise logger . debug ( "about to untar the image" ) # we can't use tarfile because of --no-same-owner: files in containers are owned # by root and tarfile is trying to `chown 0 file` when running as an unpriv user p = subprocess . Popen ( [ "tar" , "--no-same-owner" , "-C" , path , "-x" ] , stdin = subprocess . PIPE , stderr = subprocess . PIPE , ) while True : data = export_p . stdout . read ( 1048576 ) if not data : break p . stdin . write ( data ) p . stdin . close ( ) p . wait ( ) export_p . wait ( ) if export_p . returncode : logger . error ( export_p . stderr . read ( ) ) raise ConuException ( "Failed to get rootfs of %s from docker." % container ) if p . returncode : logger . error ( p . stderr . read ( ) ) raise ConuException ( "Failed to unpack the archive." ) logger . debug ( "image is unpacked" ) | take selected docker container create an archive out of it and unpack it to a selected location | 459 | 18 |
242,168 | def get_version ( self ) : raw_version = run_cmd ( [ "podman" , "version" ] , return_output = True ) regex = re . compile ( r"Version:\s*(\d+)\.(\d+)\.(\d+)" ) match = regex . findall ( raw_version ) try : return match [ 0 ] except IndexError : logger . error ( "unable to parse version from `podman version`" ) return | return 3 - tuple of version info or None | 101 | 9 |
242,169 | def list_containers ( self ) : containers = [ ] for container in self . _list_podman_containers ( ) : identifier = container [ "ID" ] name = container [ "Names" ] image_name = container [ "Image" ] try : image_name , image_tag = parse_reference ( image_name ) except ( IndexError , TypeError ) : image_name , image_tag = None , None image = PodmanImage ( image_name , tag = image_tag , identifier = None ) container = PodmanContainer ( image , identifier , name = name ) containers . append ( container ) return containers | List all available podman containers . | 135 | 7 |
242,170 | def list_images ( self ) : images = [ ] for image in self . _list_all_podman_images ( ) : try : i_name , tag = parse_reference ( image [ "names" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = PodmanImage ( i_name , tag = tag , identifier = image [ "id" ] , pull_policy = PodmanImagePullPolicy . NEVER ) images . append ( d_im ) return images | List all available podman images . | 116 | 7 |
242,171 | def inspect_to_metadata ( metadata_object , inspect_data ) : identifier = graceful_get ( inspect_data , 'Id' ) if identifier : if ":" in identifier : # format of image name from docker inspect: # sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129 metadata_object . identifier = identifier . split ( ':' ) [ 1 ] else : # container metadata_object . identifier = identifier # format of Environment Variables from docker inspect: # ['DISTTAG=f26container', 'FGC=f26'] raw_env_vars = graceful_get ( inspect_data , "Config" , "Env" ) or [ ] if raw_env_vars : metadata_object . env_variables = { } for env_variable in raw_env_vars : splits = env_variable . split ( "=" , 1 ) name = splits [ 0 ] value = splits [ 1 ] if len ( splits ) > 1 else None if value is not None : metadata_object . env_variables . update ( { name : value } ) raw_exposed_ports = graceful_get ( inspect_data , "Config" , "ExposedPorts" ) if raw_exposed_ports : metadata_object . exposed_ports = list ( raw_exposed_ports . keys ( ) ) # specific to images raw_repo_tags = graceful_get ( inspect_data , 'RepoTags' ) if raw_repo_tags : metadata_object . name = raw_repo_tags [ 0 ] metadata_object . labels = graceful_get ( inspect_data , 'Config' , 'Labels' ) metadata_object . command = graceful_get ( inspect_data , 'Config' , 'Cmd' ) metadata_object . creation_timestamp = inspect_data . get ( 'Created' , None ) # specific to images metadata_object . image_names = inspect_data . get ( 'RepoTags' , None ) # specific to images digests = inspect_data . get ( "RepoDigests" , None ) if digests : metadata_object . repo_digests = digests metadata_object . digest = digests [ 0 ] return metadata_object | process data from docker inspect and update provided metadata object | 508 | 10 |
242,172 | def inspect_to_container_metadata ( c_metadata_object , inspect_data , image_instance ) : inspect_to_metadata ( c_metadata_object , inspect_data ) status = ContainerStatus . get_from_docker ( graceful_get ( inspect_data , "State" , "Status" ) , graceful_get ( inspect_data , "State" , "ExitCode" ) , ) image_id = graceful_get ( inspect_data , "Image" ) if image_id : if ":" in image_id : # format of image name from docker inspect: # sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129 image_instance . identifier = image_id . split ( ':' ) [ 1 ] else : # container image_instance . identifier = image_id # format of Port mappings from docker inspect: # {'12345/tcp': [ # {'HostIp': '0.0.0.0', 'HostPort': '123'}, # {'HostIp': '0.0.0.0', 'HostPort': '1234'}]} port_mappings = dict ( ) raw_port_mappings = graceful_get ( inspect_data , 'HostConfig' , 'PortBindings' ) or { } for key , value in raw_port_mappings . items ( ) : for item in value : logger . debug ( "parsing ports: key = %s, item = %s" , key , item ) li = port_mappings . get ( key , [ ] ) raw_host_port = item [ 'HostPort' ] if raw_host_port == "" : int_port = None else : try : int_port = int ( raw_host_port ) except ValueError as ex : logger . error ( "could not parse port: %s" , ex ) continue li . append ( int_port ) port_mappings . update ( { key : li } ) c_metadata_object . status = status c_metadata_object . port_mappings = port_mappings c_metadata_object . hostname = graceful_get ( inspect_data , 'Config' , 'Hostname' ) raw_networks = graceful_get ( inspect_data , "NetworkSettings" , "Networks" ) . values ( ) if raw_networks : c_metadata_object . ipv4_addresses = [ graceful_get ( x , "IPAddress" ) for x in raw_networks if graceful_get ( x , "IPAddress" ) ] c_metadata_object . ipv6_addresses = [ graceful_get ( x , "GlobalIPv6Address" ) for x in raw_networks if graceful_get ( x , "GlobalIPv6Address" ) ] c_metadata_object . image = image_instance name = graceful_get ( inspect_data , "Name" ) if name : name = name [ 1 : ] if name . startswith ( "/" ) else name # remove / at the beginning c_metadata_object . name = name return c_metadata_object | process data from docker container inspect and update provided container metadata object | 701 | 12 |
242,173 | def list_pods ( self , namespace = None ) : if namespace : return [ Pod ( name = p . metadata . name , namespace = namespace , spec = p . spec ) for p in self . core_api . list_namespaced_pod ( namespace , watch = False ) . items ] return [ Pod ( name = p . metadata . name , namespace = p . metadata . namespace , spec = p . spec ) for p in self . core_api . list_pod_for_all_namespaces ( watch = False ) . items ] | List all available pods . | 116 | 5 |
242,174 | def list_services ( self , namespace = None ) : if namespace : return [ Service ( name = s . metadata . name , ports = k8s_ports_to_metadata_ports ( s . spec . ports ) , namespace = s . metadata . namespace , labels = s . metadata . labels , selector = s . spec . selector , spec = s . spec ) for s in self . core_api . list_namespaced_service ( namespace , watch = False ) . items ] return [ Service ( name = s . metadata . name , ports = k8s_ports_to_metadata_ports ( s . spec . ports ) , namespace = s . metadata . namespace , labels = s . metadata . labels , selector = s . spec . selector , spec = s . spec ) for s in self . core_api . list_service_for_all_namespaces ( watch = False ) . items ] | List all available services . | 193 | 5 |
242,175 | def list_deployments ( self , namespace = None ) : if namespace : return [ Deployment ( name = d . metadata . name , namespace = d . metadata . namespace , labels = d . metadata . labels , selector = d . spec . selector , image_metadata = ImageMetadata ( name = d . spec . template . spec . containers [ 0 ] . name . split ( "-" , 1 ) [ 0 ] ) ) for d in self . apps_api . list_namespaced_deployment ( namespace , watch = False ) . items ] return [ Deployment ( name = d . metadata . name , namespace = d . metadata . namespace , labels = d . metadata . labels , selector = d . spec . selector , image_metadata = ImageMetadata ( name = d . spec . template . spec . containers [ 0 ] . name . split ( "-" , 1 ) [ 0 ] ) ) for d in self . apps_api . list_deployment_for_all_namespaces ( watch = False ) . items ] | List all available deployments . | 221 | 5 |
242,176 | def get_url ( path , host , port , method = "http" ) : return urlunsplit ( ( method , "%s:%s" % ( host , port ) , path , "" , "" ) ) | make url from path host and port | 47 | 7 |
242,177 | def list_containers ( self ) : result = [ ] for c in self . d . containers ( all = True ) : name = None names = c . get ( "Names" , None ) if names : name = names [ 0 ] i = DockerImage ( None , identifier = c [ "ImageID" ] ) cont = DockerContainer ( i , c [ "Id" ] , name = name ) # TODO: docker_client.containers produces different metadata than inspect inspect_to_container_metadata ( cont . metadata , c , i ) result . append ( cont ) return result | List all available docker containers . | 125 | 6 |
242,178 | def list_images ( self ) : response = [ ] for im in self . d . images ( ) : try : i_name , tag = parse_reference ( im [ "RepoTags" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = DockerImage ( i_name , tag = tag , identifier = im [ "Id" ] , pull_policy = DockerImagePullPolicy . NEVER ) inspect_to_metadata ( d_im . metadata , im ) response . append ( d_im ) return response | List all available docker images . | 124 | 6 |
242,179 | def match ( ctx , features , profile , gps_precision ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None features = list ( features ) if len ( features ) != 1 : raise click . BadParameter ( "Mapmatching requires a single LineString feature" ) service = mapbox . MapMatcher ( access_token = access_token ) try : res = service . match ( features [ 0 ] , profile = profile , gps_precision = gps_precision ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : stdout = click . open_file ( '-' , 'w' ) click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Mapbox Map Matching API lets you use snap your GPS traces to the OpenStreetMap road and path network . | 199 | 23 |
242,180 | def staticmap ( ctx , mapid , output , features , lat , lon , zoom , size ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None if features : features = list ( cligj . normalize_feature_inputs ( None , 'features' , [ features ] ) ) service = mapbox . Static ( access_token = access_token ) try : res = service . image ( mapid , lon = lon , lat = lat , z = zoom , width = size [ 0 ] , height = size [ 1 ] , features = features , sort_keys = True ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : output . write ( res . content ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Generate static map images from existing Mapbox map ids . Optionally overlay with geojson features . | 201 | 22 |
242,181 | def main_group ( ctx , verbose , quiet , access_token , config ) : ctx . obj = { } config = config or os . path . join ( click . get_app_dir ( 'mapbox' ) , 'mapbox.ini' ) cfg = read_config ( config ) if cfg : ctx . obj [ 'config_file' ] = config ctx . obj [ 'cfg' ] = cfg ctx . default_map = cfg verbosity = ( os . environ . get ( 'MAPBOX_VERBOSE' ) or ctx . lookup_default ( 'mapbox.verbosity' ) or 0 ) if verbose or quiet : verbosity = verbose - quiet verbosity = int ( verbosity ) configure_logging ( verbosity ) access_token = ( access_token or os . environ . get ( 'MAPBOX_ACCESS_TOKEN' ) or os . environ . get ( 'MapboxAccessToken' ) or ctx . lookup_default ( 'mapbox.access-token' ) ) ctx . obj [ 'verbosity' ] = verbosity ctx . obj [ 'access_token' ] = access_token | This is the command line interface to Mapbox web services . | 261 | 12 |
242,182 | def config ( ctx ) : ctx . default_map = ctx . obj [ 'cfg' ] click . echo ( "CLI:" ) click . echo ( "access-token = {0}" . format ( ctx . obj [ 'access_token' ] ) ) click . echo ( "verbosity = {0}" . format ( ctx . obj [ 'verbosity' ] ) ) click . echo ( "" ) click . echo ( "Environment:" ) if 'MAPBOX_ACCESS_TOKEN' in os . environ : click . echo ( "MAPBOX_ACCESS_TOKEN = {0}" . format ( os . environ [ 'MAPBOX_ACCESS_TOKEN' ] ) ) if 'MapboxAccessToken' in os . environ : click . echo ( "MapboxAccessToken = {0}" . format ( os . environ [ 'MapboxAccessToken' ] ) ) if 'MAPBOX_VERBOSE' in os . environ : click . echo ( "MAPBOX_VERBOSE = {0}" . format ( os . environ [ 'MAPBOX_VERBOSE' ] ) ) click . echo ( "" ) if 'config_file' in ctx . obj : click . echo ( "Config file {0}:" . format ( ctx . obj [ 'config_file' ] ) ) for key , value in ctx . default_map . items ( ) : click . echo ( "{0} = {1}" . format ( key , value ) ) click . echo ( "" ) | Show access token and other configuration settings . | 333 | 8 |
242,183 | def echo_headers ( headers , file = None ) : for k , v in sorted ( headers . items ( ) ) : click . echo ( "{0}: {1}" . format ( k . title ( ) , v ) , file = file ) click . echo ( file = file ) | Echo headers sorted . | 60 | 5 |
242,184 | def datasets ( ctx ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Datasets ( access_token = access_token ) ctx . obj [ 'service' ] = service | Read and write GeoJSON from Mapbox - hosted datasets | 61 | 11 |
242,185 | def create ( ctx , name , description ) : service = ctx . obj . get ( 'service' ) res = service . create ( name , description ) if res . status_code == 200 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Create a new dataset . | 68 | 5 |
242,186 | def read_dataset ( ctx , dataset , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . read_dataset ( dataset ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Read the attributes of a dataset . | 94 | 7 |
242,187 | def list_features ( ctx , dataset , reverse , start , limit , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . list_features ( dataset , reverse , start , limit ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Get features of a dataset . | 102 | 6 |
242,188 | def put_feature ( ctx , dataset , fid , feature , input ) : if feature is None : stdin = click . open_file ( input , 'r' ) feature = stdin . read ( ) feature = json . loads ( feature ) service = ctx . obj . get ( 'service' ) res = service . update_feature ( dataset , fid , feature ) if res . status_code == 200 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Create or update a dataset feature . | 114 | 7 |
242,189 | def delete_feature ( ctx , dataset , fid ) : service = ctx . obj . get ( 'service' ) res = service . delete_feature ( dataset , fid ) if res . status_code != 204 : raise MapboxCLIException ( res . text . strip ( ) ) | Delete a feature . | 62 | 4 |
242,190 | def create_tileset ( ctx , dataset , tileset , name ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) uri = "mapbox://datasets/{username}/{dataset}" . format ( username = tileset . split ( '.' ) [ 0 ] , dataset = dataset ) res = service . create ( uri , tileset , name ) if res . status_code == 201 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Create a vector tileset from a dataset . | 149 | 9 |
242,191 | def directions ( ctx , features , profile , alternatives , geometries , overview , steps , continue_straight , waypoint_snapping , annotations , language , output ) : access_token = ( ctx . obj and ctx . obj . get ( "access_token" ) ) or None service = mapbox . Directions ( access_token = access_token ) # The Directions SDK expects False to be # a bool, not a str. if overview == "False" : overview = False # When using waypoint snapping, the # Directions SDK expects features to be # a list, not a generator. if waypoint_snapping is not None : features = list ( features ) if annotations : annotations = annotations . split ( "," ) stdout = click . open_file ( output , "w" ) try : res = service . directions ( features , profile = profile , alternatives = alternatives , geometries = geometries , overview = overview , steps = steps , continue_straight = continue_straight , waypoint_snapping = waypoint_snapping , annotations = annotations , language = language ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : if geometries == "geojson" : click . echo ( json . dumps ( res . geojson ( ) ) , file = stdout ) else : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) ) | The Mapbox Directions API will show you how to get where you re going . | 329 | 16 |
242,192 | def upload ( ctx , tileset , datasource , name , patch ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) if name is None : name = tileset . split ( "." ) [ - 1 ] if datasource . startswith ( 'https://' ) : # Skip staging. Note this this only works for specific buckets. res = service . create ( datasource , tileset , name = name , patch = patch ) else : sourcefile = click . File ( 'rb' ) ( datasource ) if hasattr ( sourcefile , 'name' ) : filelen = ( 1 if sourcefile . name == '<stdin>' else os . stat ( sourcefile . name ) . st_size ) else : filelen = ( len ( sourcefile . getbuffer ( ) ) if hasattr ( sourcefile , 'getbuffer' ) else 1 ) with click . progressbar ( length = filelen , label = 'Uploading data source' , fill_char = "#" , empty_char = '-' , file = sys . stderr ) as bar : def callback ( num_bytes ) : """Update the progress bar""" bar . update ( num_bytes ) res = service . upload ( sourcefile , tileset , name , patch = patch , callback = callback ) if res . status_code == 201 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Upload data to Mapbox accounts . | 340 | 7 |
242,193 | def _save_notebook ( self , os_path , nb ) : with self . atomic_writing ( os_path , encoding = 'utf-8' ) as f : if ftdetect ( os_path ) == 'notebook' : nbformat . write ( nb , f , version = nbformat . NO_CONVERT ) elif ftdetect ( os_path ) == 'markdown' : nbjson = nbformat . writes ( nb , version = nbformat . NO_CONVERT ) markdown = convert ( nbjson , informat = 'notebook' , outformat = 'markdown' , strip_outputs = self . strip_outputs ) f . write ( markdown ) | Save a notebook to an os_path . | 162 | 9 |
242,194 | def ftdetect ( filename ) : _ , extension = os . path . splitext ( filename ) md_exts = [ '.md' , '.markdown' , '.mkd' , '.mdown' , '.mkdn' , '.Rmd' ] nb_exts = [ '.ipynb' ] if extension in md_exts : return 'markdown' elif extension in nb_exts : return 'notebook' else : return None | Determine if filename is markdown or notebook based on the file extension . | 103 | 16 |
242,195 | def strip ( notebook ) : for cell in notebook . cells : if cell . cell_type == 'code' : cell . outputs = [ ] cell . execution_count = None | Remove outputs from a notebook . | 37 | 6 |
242,196 | def get_caption_comments ( content ) : if not content . startswith ( '## fig:' ) : return None , None content = content . splitlines ( ) id = content [ 0 ] . strip ( '## ' ) caption = [ ] for line in content [ 1 : ] : if not line . startswith ( '# ' ) or line . startswith ( '##' ) : break else : caption . append ( line . lstrip ( '# ' ) . rstrip ( ) ) # add " around the caption. TODO: consider doing this upstream # in pandoc-attributes caption = '"' + ' ' . join ( caption ) + '"' return id , caption | Retrieve an id and a caption from a code cell . | 149 | 12 |
242,197 | def new_code_block ( self , * * kwargs ) : proto = { 'content' : '' , 'type' : self . code , 'IO' : '' , 'attributes' : '' } proto . update ( * * kwargs ) return proto | Create a new code block . | 58 | 6 |
242,198 | def new_text_block ( self , * * kwargs ) : proto = { 'content' : '' , 'type' : self . markdown } proto . update ( * * kwargs ) return proto | Create a new text block . | 46 | 6 |
242,199 | def pre_process_code_block ( block ) : if 'indent' in block and block [ 'indent' ] : indent = r'^' + block [ 'indent' ] block [ 'content' ] = re . sub ( indent , '' , block [ 'icontent' ] , flags = re . MULTILINE ) | Preprocess the content of a code block modifying the code block in place . | 74 | 15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.