idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
243,100
def wait_for_port ( self , port , timeout = 10 , ** probe_kwargs ) : Probe ( timeout = timeout , fnc = functools . partial ( self . is_port_open , port ) , ** probe_kwargs ) . run ( )
block until specified port starts accepting connections raises an exc ProbeTimeout if timeout is reached
243,101
def mount ( self , mount_point = None ) : cmd = [ "podman" , "mount" , self . _id or self . get_id ( ) ] output = run_cmd ( cmd , return_output = True ) . rstrip ( "\n\r" ) return output
mount container filesystem
243,102
def wait ( self , timeout = None ) : timeout = [ "--interval=%s" % timeout ] if timeout else [ ] cmdline = [ "podman" , "wait" ] + timeout + [ self . _id or self . get_id ( ) ] return run_cmd ( cmdline , return_output = True )
Block until the container stops then return its exit code . Similar to the podman wait command .
243,103
def read_file ( self , file_path ) : try : with open ( self . p ( file_path ) ) as fd : return fd . read ( ) except IOError as ex : logger . error ( "error while accessing file %s: %r" , file_path , ex ) raise ConuException ( "There was an error while accessing file %s: %r" , file_path , ex )
read file specified via file_path and return its content - raises an ConuException if there is an issue accessing the file
243,104
def get_file ( self , file_path , mode = "r" ) : return open ( self . p ( file_path ) , mode = mode )
provide File object specified via file_path
243,105
def file_is_present ( self , file_path ) : p = self . p ( file_path ) if not os . path . exists ( p ) : return False if not os . path . isfile ( p ) : raise IOError ( "%s is not a file" % file_path ) return True
check if file file_path is present raises IOError if file_path is not a file
243,106
def directory_is_present ( self , directory_path ) : p = self . p ( directory_path ) if not os . path . exists ( p ) : return False if not os . path . isdir ( p ) : raise IOError ( "%s is not a directory" % directory_path ) return True
check if directory directory_path is present raise IOError if it s not a directory
243,107
def get_selinux_context ( self , file_path ) : p = self . p ( file_path ) if not HAS_XATTR : raise RuntimeError ( "'xattr' python module is not available, hence we cannot " "determine the SELinux context for this file. " "In Fedora this module is available as python3-pyxattr -- " "other distributions may follow similar...
Get SELinux file context of the selected file .
243,108
def _wrapper ( self , q , start ) : try : func_name = self . fnc . __name__ except AttributeError : func_name = str ( self . fnc ) logger . debug ( "Running \"%s\" with parameters: \"%s\":\t%s/%s" % ( func_name , str ( self . kwargs ) , round ( time . time ( ) - start ) , self . timeout ) ) try : result = self . fnc ( ...
_wrapper checks return status of Probe . fnc and provides the result for process managing
243,109
def transport_param ( image ) : transports = { SkopeoTransport . CONTAINERS_STORAGE : "containers-storage:" , SkopeoTransport . DIRECTORY : "dir:" , SkopeoTransport . DOCKER : "docker://" , SkopeoTransport . DOCKER_ARCHIVE : "docker-archive" , SkopeoTransport . DOCKER_DAEMON : "docker-daemon:" , SkopeoTransport . OCI :...
Parse DockerImage info into skopeo parameter
243,110
def is_running ( self ) : cmd = [ "machinectl" , "--no-pager" , "status" , self . name ] try : subprocess . check_call ( cmd ) return True except subprocess . CalledProcessError as ex : logger . info ( "nspawn container %s is not running probably: %s" , self . name , ex . output ) return False
return True when container is running otherwise return False
243,111
def copy_from ( self , src , dest ) : logger . debug ( "copying %s from host to container at %s" , src , dest ) cmd = [ "machinectl" , "--no-pager" , "copy-from" , self . name , src , dest ] run_cmd ( cmd )
copy a file or a directory from container or image to host system .
243,112
def delete ( self , force = False , volumes = False ) : try : self . image . rmi ( ) except ConuException as ime : if not force : raise ime else : pass
delete underlying image
243,113
def cleanup ( self , force = False , delete = False ) : try : self . stop ( ) except subprocess . CalledProcessError as stop : logger . debug ( "unable to stop container via stop" , stop ) if not force : raise stop try : self . kill ( ) except subprocess . CalledProcessError as kill : logger . debug ( "unable to stop c...
Stop container and delete image if given param delete
243,114
def run_systemdrun ( self , command , internal_background = False , return_full_dict = False , ** kwargs ) : internalkw = deepcopy ( kwargs ) or { } original_ignore_st = internalkw . get ( "ignore_status" , False ) original_return_st = internalkw . get ( "return_output" , False ) internalkw [ "ignore_status" ] = True i...
execute command via systemd - run inside container
243,115
def _wait_for_machine_booted ( name , suffictinet_texts = None ) : suffictinet_texts = suffictinet_texts or [ "systemd-logind" ] for foo in range ( constants . DEFAULT_RETRYTIMEOUT ) : time . sleep ( constants . DEFAULT_SLEEP ) out = run_cmd ( [ "machinectl" , "--no-pager" , "status" , name ] , ignore_status = True , r...
Internal method wait until machine is ready in common case means there is running systemd - logind
243,116
def _internal_reschedule ( callback , retry = 3 , sleep_time = constants . DEFAULT_SLEEP ) : for foo in range ( retry ) : container_process = callback [ 0 ] ( callback [ 1 ] , * callback [ 2 ] , ** callback [ 3 ] ) time . sleep ( sleep_time ) container_process . poll ( ) rcode = container_process . returncode if rcode ...
workaround method for internal_run_container method It sometimes fails because of Dbus or whatever so try to start it moretimes
243,117
def internal_run_container ( name , callback_method , foreground = False ) : if not foreground : logger . info ( "Stating machine (boot nspawn container) {}" . format ( name ) ) nspawn_process = NspawnContainer . _internal_reschedule ( callback_method ) NspawnContainer . _wait_for_machine_booted ( name ) logger . info ...
Internal method what runs container process
243,118
def get_container_output ( backend , image_name , command , image_tag = "latest" , additional_opts = None ) : image = backend . ImageClass ( image_name , tag = image_tag ) c = image . run_via_binary ( DockerRunBuilder ( command = command , additional_opts = additional_opts ) ) try : c . wait ( ) return c . logs_unicode...
Create a throw - away container based on provided image and tag run the supplied command in it and return output . The container is stopped and removed after it exits .
243,119
def pull ( self ) : for json_e in self . d . pull ( repository = self . name , tag = self . tag , stream = True , decode = True ) : logger . debug ( json_e ) status = graceful_get ( json_e , "status" ) if status : logger . info ( status ) else : error = graceful_get ( json_e , "error" ) logger . error ( status ) raise ...
Pull this image from registry . Raises an exception if the image is not found in the registry .
243,120
def using_transport ( self , transport = None , path = None , logs = True ) : if not transport : return self if self . transport == transport and self . path == path : return self path_required = [ SkopeoTransport . DIRECTORY , SkopeoTransport . DOCKER_ARCHIVE , SkopeoTransport . OCI ] if transport in path_required : i...
change used transport
243,121
def save_to ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid target image type" , type ( image ) ) self . copy ( image . name , image . tag , target_transport = image . transport , target_path = image . path , logs = False )
Save this image to another DockerImage
243,122
def load_from ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid source image type" , type ( image ) ) image . save_to ( self )
Load from another DockerImage to this one
243,123
def skopeo_pull ( self ) : return self . copy ( self . name , self . tag , SkopeoTransport . DOCKER , SkopeoTransport . DOCKER_DAEMON ) . using_transport ( SkopeoTransport . DOCKER_DAEMON )
Pull image from Docker to local Docker daemon using skopeo
243,124
def skopeo_push ( self , repository = None , tag = None ) : return self . copy ( repository , tag , SkopeoTransport . DOCKER_DAEMON , SkopeoTransport . DOCKER ) . using_transport ( SkopeoTransport . DOCKER )
Push image from Docker daemon to Docker using skopeo
243,125
def copy ( self , repository = None , tag = None , source_transport = None , target_transport = SkopeoTransport . DOCKER , source_path = None , target_path = None , logs = True ) : if not repository : repository = self . name if not tag : tag = self . tag if self . tag else "latest" if target_transport == SkopeoTranspo...
Copy this image
243,126
def tag_image ( self , repository = None , tag = None ) : if not ( repository or tag ) : raise ValueError ( "You need to specify either repository or tag." ) r = repository or self . name t = "latest" if not tag else tag self . d . tag ( image = self . get_full_name ( ) , repository = r , tag = t ) return DockerImage (...
Apply additional tags to the image or even add a new name
243,127
def inspect ( self , refresh = True ) : if refresh or not self . _inspect_data : identifier = self . _id or self . get_full_name ( ) if not identifier : raise ConuException ( "This image does not have a valid identifier." ) self . _inspect_data = self . d . inspect_image ( identifier ) return self . _inspect_data
provide metadata about the image ; flip refresh = True if cached metadata are enough
243,128
def has_pkgs_signed_with ( self , allowed_keys ) : if not allowed_keys or not isinstance ( allowed_keys , list ) : raise ConuException ( "allowed_keys must be a list" ) command = [ 'rpm' , '-qa' , '--qf' , '%{name} %{SIGPGP:pgpsig}\n' ] cont = self . run_via_binary ( command = command ) try : out = cont . logs_unicode ...
Check signature of packages installed in image . Raises exception when
243,129
def build ( cls , path , tag = None , dockerfile = None ) : if not path : raise ConuException ( 'Please specify path to the directory containing the Dockerfile' ) client = get_client ( ) response = [ line for line in client . build ( path , rm = True , tag = tag , dockerfile = dockerfile , quiet = True ) ] if not respo...
Build the image from the provided dockerfile in path
243,130
def layers ( self , rev = True ) : image_layers = [ DockerImage ( None , identifier = x , pull_policy = DockerImagePullPolicy . NEVER ) for x in self . get_layer_ids ( ) ] if not rev : image_layers . reverse ( ) return image_layers
Get list of DockerImage for every layer in image
243,131
def extend ( self , source , new_image_name , s2i_args = None ) : s2i_args = s2i_args or [ ] c = self . _s2i_command ( [ "build" ] + s2i_args + [ source , self . get_full_name ( ) ] ) if new_image_name : c . append ( new_image_name ) try : run_cmd ( c ) except subprocess . CalledProcessError as ex : raise ConuException...
extend this s2i - enabled image using provided source raises ConuException if s2i build fails
243,132
def usage ( self ) : c = self . _s2i_command ( [ "usage" , self . get_full_name ( ) ] ) with open ( os . devnull , "w" ) as fd : process = subprocess . Popen ( c , stdout = fd , stderr = subprocess . PIPE ) _ , output = process . communicate ( ) retcode = process . poll ( ) if retcode : raise ConuException ( "`s2i usag...
Provide output of s2i usage
243,133
def http_request ( self , path = "/" , method = "GET" , host = None , port = None , json = False , data = None ) : host = host or '127.0.0.1' port = port or 8080 url = get_url ( host = host , port = port , path = path ) return self . http_session . request ( method , url , json = json , data = data )
perform a HTTP request
243,134
def system_requirements ( ) : command_exists ( "systemd-nspawn" , [ "systemd-nspawn" , "--version" ] , "Command systemd-nspawn does not seems to be present on your system" "Do you have system with systemd" ) command_exists ( "machinectl" , [ "machinectl" , "--no-pager" , "--help" ] , "Command machinectl does not seems ...
Check if all necessary packages are installed on system
243,135
def _generate_id ( self ) : name = self . name . replace ( self . special_separator , "-" ) . replace ( "." , "-" ) loc = "\/" if self . location : loc = self . location _id = "{PREFIX}{SEP}{NAME}{HASH}{SEP}" . format ( PREFIX = constants . CONU_ARTIFACT_TAG , NAME = name , HASH = hashlib . sha512 ( loc ) . hexdigest (...
create new unique identifier
243,136
def pull ( self ) : if not os . path . exists ( CONU_IMAGES_STORE ) : os . makedirs ( CONU_IMAGES_STORE ) logger . debug ( "Try to pull: {} -> {}" . format ( self . location , self . local_location ) ) if not self . _is_local ( ) : compressed_location = self . local_location + ".xz" run_cmd ( [ "curl" , "-f" , "-L" , "...
Pull this image from URL .
243,137
def run_via_binary ( self , command = None , foreground = False , volumes = None , additional_opts = None , default_options = None , name = None , * args , ** kwargs ) : command = deepcopy ( command ) or [ ] volumes = deepcopy ( volumes ) or [ ] additional_opts = deepcopy ( additional_opts ) or [ ] internalkw = deepcop...
Create new instance NspawnContianer in case of not running at foreground in case foreground run return process object
243,138
def process_rpm_ql_line ( line_str , allowed_keys ) : try : name , key_str = line_str . split ( ' ' , 1 ) except ValueError : logger . error ( "Failed to split line '{0}" . format ( repr ( line_str ) ) ) return False if name in no_key_pkgs : return True if key_str == NONE_KEY : logger . error ( "Unsigned package {0}" ....
Checks single line of rpm - ql for correct keys
243,139
def check_signatures ( pkg_list , allowed_keys ) : all_passed = True for line_str in pkg_list : all_passed &= process_rpm_ql_line ( line_str . strip ( ) , allowed_keys ) if not all_passed : raise PackageSignatureException ( 'Error while checking rpm signatures, see logs for more info' )
Go through list of packages with signatures and check if all are properly signed
243,140
def get_ports ( self ) : ports = [ ] container_ports = self . inspect ( refresh = True ) [ "NetworkSettings" ] [ "Ports" ] if not container_ports : return ports for p in container_ports : ports . append ( p . split ( "/" ) [ 0 ] ) return ports
get ports specified in container metadata
243,141
def _clean_tmp_dirs ( self ) : def onerror ( fnc , path , excinfo ) : self . logger . info ( "we were not able to remove temporary file %s: %s" , path , excinfo [ 1 ] ) shutil . rmtree ( self . tmpdir , onerror = onerror ) self . tmpdir = None global _backend_tmpdir _backend_tmpdir = None
Remove temporary dir associated with this backend instance .
243,142
def _clean ( self ) : if CleanupPolicy . EVERYTHING in self . cleanup : self . cleanup_containers ( ) self . cleanup_volumes ( ) self . cleanup_images ( ) self . _clean_tmp_dirs ( ) else : if CleanupPolicy . CONTAINERS in self . cleanup : self . cleanup_containers ( ) if CleanupPolicy . VOLUMES in self . cleanup : self...
Method for cleaning according to object cleanup policy value
243,143
def list_containers ( self ) : data = run_cmd ( [ "machinectl" , "list" , "--no-legend" , "--no-pager" ] , return_output = True ) output = [ ] reg = re . compile ( r"\s+" ) for line in data . split ( "\n" ) : stripped = line . strip ( ) if stripped : parts = reg . split ( stripped ) name = parts [ 0 ] output . append (...
list all available nspawn containers
243,144
def list_images ( self ) : data = os . listdir ( CONU_IMAGES_STORE ) output = [ ] for name in data : output . append ( self . ImageClass ( name , pull_policy = ImagePullPolicy . NEVER ) ) return output
list all available nspawn images
243,145
def cleanup_containers ( self ) : for cont in self . list_containers ( ) : if CONU_ARTIFACT_TAG in cont . name : try : logger . debug ( "removing container %s created by conu" , cont ) run_cmd ( [ "machinectl" , "terminate" , cont . name ] ) except subprocess . CalledProcessError as e : logger . error ( "unable to remo...
stop all container created by conu
243,146
def check_port ( port , host , timeout = 10 ) : logger . info ( "trying to open connection to %s:%s" , host , port ) sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : sock . settimeout ( timeout ) result = sock . connect_ex ( ( host , port ) ) logger . info ( "was connection successful? errno: %s...
connect to port on host and return True on success
243,147
def get_selinux_status ( ) : getenforce_command_exists ( ) o = run_cmd ( [ "getenforce" ] , return_output = True ) . strip ( ) logger . debug ( "SELinux is %r" , o ) return o
get SELinux status of host
243,148
def random_str ( size = 10 ) : return '' . join ( random . choice ( string . ascii_lowercase ) for _ in range ( size ) )
create random string of selected size
243,149
def run_cmd ( cmd , return_output = False , ignore_status = False , log_output = True , ** kwargs ) : logger . debug ( 'command: "%s"' % ' ' . join ( cmd ) ) process = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , universal_newlines = True , ** kwargs ) output = process . commun...
run provided command on host system using the same user as you invoked this code raises subprocess . CalledProcessError if it fails
243,150
def command_exists ( command , noop_invocation , exc_msg ) : try : found = bool ( shutil . which ( command ) ) except AttributeError : try : p = subprocess . Popen ( noop_invocation , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except OSError : found = False else : stdout , stderr = p . communicate ( ) fo...
Verify that the provided command exists . Raise CommandDoesNotExistException in case of an error or if the command does not exist .
243,151
def check_docker_command_works ( ) : try : out = subprocess . check_output ( [ "docker" , "version" ] , stderr = subprocess . STDOUT , universal_newlines = True ) except OSError : logger . info ( "docker binary is not available" ) raise CommandDoesNotExistException ( "docker command doesn't seem to be available on your...
Verify that dockerd and docker binary works fine . This is performed by calling docker version which also checks server API version .
243,152
def export_docker_container_to_directory ( client , container , path ) : check_docker_command_works ( ) export_p = subprocess . Popen ( [ "docker" , "export" , container . get_id ( ) ] , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) try : os . mkdir ( path , 0o0700 ) except OSError as ex : if ex . errno == ...
take selected docker container create an archive out of it and unpack it to a selected location
243,153
def get_version ( self ) : raw_version = run_cmd ( [ "podman" , "version" ] , return_output = True ) regex = re . compile ( r"Version:\s*(\d+)\.(\d+)\.(\d+)" ) match = regex . findall ( raw_version ) try : return match [ 0 ] except IndexError : logger . error ( "unable to parse version from `podman version`" ) return
return 3 - tuple of version info or None
243,154
def list_containers ( self ) : containers = [ ] for container in self . _list_podman_containers ( ) : identifier = container [ "ID" ] name = container [ "Names" ] image_name = container [ "Image" ] try : image_name , image_tag = parse_reference ( image_name ) except ( IndexError , TypeError ) : image_name , image_tag =...
List all available podman containers .
243,155
def list_images ( self ) : images = [ ] for image in self . _list_all_podman_images ( ) : try : i_name , tag = parse_reference ( image [ "names" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = PodmanImage ( i_name , tag = tag , identifier = image [ "id" ] , pull_policy = PodmanImagePullP...
List all available podman images .
243,156
def inspect_to_metadata ( metadata_object , inspect_data ) : identifier = graceful_get ( inspect_data , 'Id' ) if identifier : if ":" in identifier : metadata_object . identifier = identifier . split ( ':' ) [ 1 ] else : metadata_object . identifier = identifier raw_env_vars = graceful_get ( inspect_data , "Config" , "...
process data from docker inspect and update provided metadata object
243,157
def inspect_to_container_metadata ( c_metadata_object , inspect_data , image_instance ) : inspect_to_metadata ( c_metadata_object , inspect_data ) status = ContainerStatus . get_from_docker ( graceful_get ( inspect_data , "State" , "Status" ) , graceful_get ( inspect_data , "State" , "ExitCode" ) , ) image_id = gracefu...
process data from docker container inspect and update provided container metadata object
243,158
def list_pods ( self , namespace = None ) : if namespace : return [ Pod ( name = p . metadata . name , namespace = namespace , spec = p . spec ) for p in self . core_api . list_namespaced_pod ( namespace , watch = False ) . items ] return [ Pod ( name = p . metadata . name , namespace = p . metadata . namespace , spec ...
List all available pods .
243,159
def list_services ( self , namespace = None ) : if namespace : return [ Service ( name = s . metadata . name , ports = k8s_ports_to_metadata_ports ( s . spec . ports ) , namespace = s . metadata . namespace , labels = s . metadata . labels , selector = s . spec . selector , spec = s . spec ) for s in self . core_api . ...
List all available services .
243,160
def list_deployments ( self , namespace = None ) : if namespace : return [ Deployment ( name = d . metadata . name , namespace = d . metadata . namespace , labels = d . metadata . labels , selector = d . spec . selector , image_metadata = ImageMetadata ( name = d . spec . template . spec . containers [ 0 ] . name . spl...
List all available deployments .
243,161
def get_url ( path , host , port , method = "http" ) : return urlunsplit ( ( method , "%s:%s" % ( host , port ) , path , "" , "" ) )
make url from path host and port
243,162
def list_containers ( self ) : result = [ ] for c in self . d . containers ( all = True ) : name = None names = c . get ( "Names" , None ) if names : name = names [ 0 ] i = DockerImage ( None , identifier = c [ "ImageID" ] ) cont = DockerContainer ( i , c [ "Id" ] , name = name ) inspect_to_container_metadata ( cont . ...
List all available docker containers .
243,163
def list_images ( self ) : response = [ ] for im in self . d . images ( ) : try : i_name , tag = parse_reference ( im [ "RepoTags" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = DockerImage ( i_name , tag = tag , identifier = im [ "Id" ] , pull_policy = DockerImagePullPolicy . NEVER ) i...
List all available docker images .
243,164
def match ( ctx , features , profile , gps_precision ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None features = list ( features ) if len ( features ) != 1 : raise click . BadParameter ( "Mapmatching requires a single LineString feature" ) service = mapbox . MapMatcher ( access_token = ac...
Mapbox Map Matching API lets you use snap your GPS traces to the OpenStreetMap road and path network .
243,165
def staticmap ( ctx , mapid , output , features , lat , lon , zoom , size ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None if features : features = list ( cligj . normalize_feature_inputs ( None , 'features' , [ features ] ) ) service = mapbox . Static ( access_token = access_token ) try ...
Generate static map images from existing Mapbox map ids . Optionally overlay with geojson features .
243,166
def main_group ( ctx , verbose , quiet , access_token , config ) : ctx . obj = { } config = config or os . path . join ( click . get_app_dir ( 'mapbox' ) , 'mapbox.ini' ) cfg = read_config ( config ) if cfg : ctx . obj [ 'config_file' ] = config ctx . obj [ 'cfg' ] = cfg ctx . default_map = cfg verbosity = ( os . envir...
This is the command line interface to Mapbox web services .
243,167
def config ( ctx ) : ctx . default_map = ctx . obj [ 'cfg' ] click . echo ( "CLI:" ) click . echo ( "access-token = {0}" . format ( ctx . obj [ 'access_token' ] ) ) click . echo ( "verbosity = {0}" . format ( ctx . obj [ 'verbosity' ] ) ) click . echo ( "" ) click . echo ( "Environment:" ) if 'MAPBOX_ACCESS_TOKEN' in o...
Show access token and other configuration settings .
243,168
def echo_headers ( headers , file = None ) : for k , v in sorted ( headers . items ( ) ) : click . echo ( "{0}: {1}" . format ( k . title ( ) , v ) , file = file ) click . echo ( file = file )
Echo headers sorted .
243,169
def datasets ( ctx ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Datasets ( access_token = access_token ) ctx . obj [ 'service' ] = service
Read and write GeoJSON from Mapbox - hosted datasets
243,170
def create ( ctx , name , description ) : service = ctx . obj . get ( 'service' ) res = service . create ( name , description ) if res . status_code == 200 : click . echo ( res . text ) else : raise MapboxCLIException ( res . text . strip ( ) )
Create a new dataset .
243,171
def read_dataset ( ctx , dataset , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . read_dataset ( dataset ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) )
Read the attributes of a dataset .
243,172
def list_features ( ctx , dataset , reverse , start , limit , output ) : stdout = click . open_file ( output , 'w' ) service = ctx . obj . get ( 'service' ) res = service . list_features ( dataset , reverse , start , limit ) if res . status_code == 200 : click . echo ( res . text , file = stdout ) else : raise MapboxCL...
Get features of a dataset .
243,173
def put_feature ( ctx , dataset , fid , feature , input ) : if feature is None : stdin = click . open_file ( input , 'r' ) feature = stdin . read ( ) feature = json . loads ( feature ) service = ctx . obj . get ( 'service' ) res = service . update_feature ( dataset , fid , feature ) if res . status_code == 200 : click ...
Create or update a dataset feature .
243,174
def delete_feature ( ctx , dataset , fid ) : service = ctx . obj . get ( 'service' ) res = service . delete_feature ( dataset , fid ) if res . status_code != 204 : raise MapboxCLIException ( res . text . strip ( ) )
Delete a feature .
243,175
def create_tileset ( ctx , dataset , tileset , name ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) uri = "mapbox://datasets/{username}/{dataset}" . format ( username = tileset . split ( '.' ) [ 0 ] , dataset = dataset ) res = s...
Create a vector tileset from a dataset .
243,176
def directions ( ctx , features , profile , alternatives , geometries , overview , steps , continue_straight , waypoint_snapping , annotations , language , output ) : access_token = ( ctx . obj and ctx . obj . get ( "access_token" ) ) or None service = mapbox . Directions ( access_token = access_token ) if overview == ...
The Mapbox Directions API will show you how to get where you re going .
243,177
def upload ( ctx , tileset , datasource , name , patch ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None service = mapbox . Uploader ( access_token = access_token ) if name is None : name = tileset . split ( "." ) [ - 1 ] if datasource . startswith ( 'https://' ) : res = service . create (...
Upload data to Mapbox accounts .
243,178
def _save_notebook ( self , os_path , nb ) : with self . atomic_writing ( os_path , encoding = 'utf-8' ) as f : if ftdetect ( os_path ) == 'notebook' : nbformat . write ( nb , f , version = nbformat . NO_CONVERT ) elif ftdetect ( os_path ) == 'markdown' : nbjson = nbformat . writes ( nb , version = nbformat . NO_CONVER...
Save a notebook to an os_path .
243,179
def ftdetect ( filename ) : _ , extension = os . path . splitext ( filename ) md_exts = [ '.md' , '.markdown' , '.mkd' , '.mdown' , '.mkdn' , '.Rmd' ] nb_exts = [ '.ipynb' ] if extension in md_exts : return 'markdown' elif extension in nb_exts : return 'notebook' else : return None
Determine if filename is markdown or notebook based on the file extension .
243,180
def strip ( notebook ) : for cell in notebook . cells : if cell . cell_type == 'code' : cell . outputs = [ ] cell . execution_count = None
Remove outputs from a notebook .
243,181
def get_caption_comments ( content ) : if not content . startswith ( '## fig:' ) : return None , None content = content . splitlines ( ) id = content [ 0 ] . strip ( '## ' ) caption = [ ] for line in content [ 1 : ] : if not line . startswith ( '# ' ) or line . startswith ( '##' ) : break else : caption . append ( line...
Retrieve an id and a caption from a code cell .
243,182
def new_code_block ( self , ** kwargs ) : proto = { 'content' : '' , 'type' : self . code , 'IO' : '' , 'attributes' : '' } proto . update ( ** kwargs ) return proto
Create a new code block .
243,183
def new_text_block ( self , ** kwargs ) : proto = { 'content' : '' , 'type' : self . markdown } proto . update ( ** kwargs ) return proto
Create a new text block .
243,184
def pre_process_code_block ( block ) : if 'indent' in block and block [ 'indent' ] : indent = r'^' + block [ 'indent' ] block [ 'content' ] = re . sub ( indent , '' , block [ 'icontent' ] , flags = re . MULTILINE )
Preprocess the content of a code block modifying the code block in place .
243,185
def process_code_block ( self , block ) : if block [ 'type' ] != self . code : return block attr = PandocAttributes ( block [ 'attributes' ] , 'markdown' ) if self . match == 'all' : pass elif self . match == 'fenced' and block . get ( 'indent' ) : return self . new_text_block ( content = ( '\n' + block [ 'icontent' ] ...
Parse block attributes
243,186
def parse_blocks ( self , text ) : code_matches = [ m for m in self . code_pattern . finditer ( text ) ] text_starts = [ 0 ] + [ m . end ( ) for m in code_matches ] text_stops = [ m . start ( ) for m in code_matches ] + [ len ( text ) ] text_limits = list ( zip ( text_starts , text_stops ) ) code_blocks = [ self . new_...
Extract the code and non - code blocks from given markdown text .
243,187
def create_code_cell ( block ) : code_cell = nbbase . new_code_cell ( source = block [ 'content' ] ) attr = block [ 'attributes' ] if not attr . is_empty : code_cell . metadata = nbbase . NotebookNode ( { 'attributes' : attr . to_dict ( ) } ) execution_count = attr . kvs . get ( 'n' ) if not execution_count : code_cell...
Create a notebook code cell from a block .
243,188
def create_markdown_cell ( block ) : kwargs = { 'cell_type' : block [ 'type' ] , 'source' : block [ 'content' ] } markdown_cell = nbbase . new_markdown_cell ( ** kwargs ) return markdown_cell
Create a markdown cell from a block .
243,189
def create_cells ( self , blocks ) : cells = [ ] for block in blocks : if ( block [ 'type' ] == self . code ) and ( block [ 'IO' ] == 'input' ) : code_cell = self . create_code_cell ( block ) cells . append ( code_cell ) elif ( block [ 'type' ] == self . code and block [ 'IO' ] == 'output' and cells [ - 1 ] . cell_type...
Turn the list of blocks into a list of notebook cells .
243,190
def to_notebook ( self , s , ** kwargs ) : all_blocks = self . parse_blocks ( s ) if self . pre_code_block [ 'content' ] : all_blocks . insert ( 0 , self . pre_code_block ) blocks = [ self . process_code_block ( block ) for block in all_blocks ] cells = self . create_cells ( blocks ) nb = nbbase . new_notebook ( cells ...
Convert the markdown string s to an IPython notebook .
243,191
def write_resources ( self , resources ) : for filename , data in list ( resources . get ( 'outputs' , { } ) . items ( ) ) : dest = os . path . join ( self . output_dir , filename ) path = os . path . dirname ( dest ) if path and not os . path . isdir ( path ) : os . makedirs ( path ) with open ( dest , 'wb' ) as f : f...
Write the output data in resources returned by exporter to files .
243,192
def string2json ( self , string ) : kwargs = { 'cls' : BytesEncoder , 'indent' : 1 , 'sort_keys' : True , 'separators' : ( ',' , ': ' ) , } return cast_unicode ( json . dumps ( string , ** kwargs ) , 'utf-8' )
Convert json into its string representation . Used for writing outputs to markdown .
243,193
def create_attributes ( self , cell , cell_type = None ) : if self . strip_outputs or not hasattr ( cell , 'execution_count' ) : return 'python' attrs = cell . metadata . get ( 'attributes' ) attr = PandocAttributes ( attrs , 'dict' ) if 'python' in attr . classes : attr . classes . remove ( 'python' ) if 'input' in at...
Turn the attribute dict into an attribute string for the code block .
243,194
def dequote ( s ) : if len ( s ) < 2 : return s elif ( s [ 0 ] == s [ - 1 ] ) and s . startswith ( ( '"' , "'" ) ) : return s [ 1 : - 1 ] else : return s
Remove excess quotes from a string .
243,195
def data2uri ( data , data_type ) : MIME_MAP = { 'image/jpeg' : 'jpeg' , 'image/png' : 'png' , 'text/plain' : 'text' , 'text/html' : 'html' , 'text/latex' : 'latex' , 'application/javascript' : 'html' , 'image/svg+xml' : 'svg' , } inverse_map = { v : k for k , v in list ( MIME_MAP . items ( ) ) } mime_type = inverse_ma...
Convert base64 data into a data uri with the given data_type .
243,196
def magic ( self , alias ) : if alias in self . aliases : return self . aliases [ alias ] else : return "%%{}\n" . format ( alias )
Returns the appropriate IPython code magic when called with an alias for a language .
243,197
def knit ( self , input_file , opts_chunk = 'eval=FALSE' ) : tmp_in = tempfile . NamedTemporaryFile ( mode = 'w+' ) tmp_out = tempfile . NamedTemporaryFile ( mode = 'w+' ) tmp_in . file . write ( input_file . read ( ) ) tmp_in . file . flush ( ) tmp_in . file . seek ( 0 ) self . _knit ( tmp_in . name , tmp_out . name ,...
Use Knitr to convert the r - markdown input_file into markdown returning a file object .
243,198
def is_path_protected ( path ) : protected = True for exclude_path in TERMS_EXCLUDE_URL_PREFIX_LIST : if path . startswith ( exclude_path ) : protected = False for contains_path in TERMS_EXCLUDE_URL_CONTAINS_LIST : if contains_path in path : protected = False if path in TERMS_EXCLUDE_URL_LIST : protected = False if pat...
returns True if given path is to be protected otherwise False
243,199
def process_request ( self , request ) : LOGGER . debug ( 'termsandconditions.middleware' ) current_path = request . META [ 'PATH_INFO' ] if DJANGO_VERSION <= ( 2 , 0 , 0 ) : user_authenticated = request . user . is_authenticated ( ) else : user_authenticated = request . user . is_authenticated if user_authenticated an...
Process each request to app to ensure terms have been accepted