idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
28,400
def unpack_auth_b64 ( self , docker_registry ) : UnpackedAuth = namedtuple ( 'UnpackedAuth' , [ 'raw_str' , 'username' , 'password' ] ) credentials = self . get_credentials ( docker_registry ) auth_b64 = credentials . get ( 'auth' ) if auth_b64 : raw_str = b64decode ( auth_b64 ) . decode ( 'utf-8' ) unpacked_credentials = raw_str . split ( ':' , 1 ) if len ( unpacked_credentials ) == 2 : return UnpackedAuth ( raw_str , * unpacked_credentials ) else : raise ValueError ( "Failed to parse 'auth' in '%s'" % self . json_secret_path )
Decode and unpack base64 auth credentials from config file .
180
13
28,401
def default ( self ) : return self . v2_list or self . oci_index or self . oci or self . v2 or self . v1
Return the default manifest schema version .
35
7
28,402
def get_log_files ( self , osbs , build_id ) : logs = None output = [ ] # Collect logs from server try : logs = osbs . get_orchestrator_build_logs ( build_id ) except OsbsException as ex : self . log . error ( "unable to get build logs: %r" , ex ) return output except TypeError : # Older osbs-client has no get_orchestrator_build_logs self . log . error ( "OSBS client does not support get_orchestrator_build_logs" ) return output platform_logs = { } for entry in logs : platform = entry . platform if platform not in platform_logs : filename = 'orchestrator' if platform is None else platform platform_logs [ platform ] = NamedTemporaryFile ( prefix = "%s-%s" % ( build_id , filename ) , suffix = ".log" , mode = 'r+b' ) platform_logs [ platform ] . write ( ( entry . line + '\n' ) . encode ( 'utf-8' ) ) for platform , logfile in platform_logs . items ( ) : logfile . flush ( ) filename = 'orchestrator' if platform is None else platform metadata = self . get_log_metadata ( logfile . name , "%s.log" % filename ) output . append ( Output ( file = logfile , metadata = metadata ) ) return output
Build list of log files
322
5
28,403
def update_image_digest ( self , image , platform , digest ) : image_name_tag = self . _key ( image ) image_name = image . to_str ( tag = False ) name_digest = '{}@{}' . format ( image_name , digest ) image_digests = self . _images_digests . setdefault ( image_name_tag , { } ) image_digests [ platform ] = name_digest
Update parent image digest for specific platform
102
7
28,404
def get_image_digests ( self , image ) : image_name_tag = self . _key ( image ) image_digests = self . _images_digests . get ( image_name_tag ) if image_digests is None : raise KeyError ( 'Image {} has no digest records' . format ( image_name_tag ) ) return image_digests
Get platform digests of specified image
82
7
28,405
def get_image_platform_digest ( self , image , platform ) : image_digests = self . get_image_digests ( image ) digest = image_digests . get ( platform ) if digest is None : raise KeyError ( 'Image {} has no digest record for platform {}' . format ( image , platform ) ) return digest
Get digest of specified image and platform
74
7
28,406
def rpm_qf_args ( tags = None , separator = ';' ) : if tags is None : tags = image_component_rpm_tags fmt = separator . join ( [ "%%{%s}" % tag for tag in tags ] ) return r"-qa --qf '{0}\n'" . format ( fmt )
Return the arguments to pass to rpm to list RPMs in the format expected by parse_rpm_output .
74
22
28,407
def parse_rpm_output ( output , tags = None , separator = ';' ) : if tags is None : tags = image_component_rpm_tags def field ( tag ) : """ Get a field value by name """ try : value = fields [ tags . index ( tag ) ] except ValueError : return None if value == '(none)' : return None return value components = [ ] sigmarker = 'Key ID ' for rpm in output : fields = rpm . rstrip ( '\n' ) . split ( separator ) if len ( fields ) < len ( tags ) : continue signature = field ( 'SIGPGP:pgpsig' ) or field ( 'SIGGPG:pgpsig' ) if signature : parts = signature . split ( sigmarker , 1 ) if len ( parts ) > 1 : signature = parts [ 1 ] component_rpm = { 'type' : 'rpm' , 'name' : field ( 'NAME' ) , 'version' : field ( 'VERSION' ) , 'release' : field ( 'RELEASE' ) , 'arch' : field ( 'ARCH' ) , 'sigmd5' : field ( 'SIGMD5' ) , 'signature' : signature , } # Special handling for epoch as it must be an integer or None epoch = field ( 'EPOCH' ) if epoch is not None : epoch = int ( epoch ) component_rpm [ 'epoch' ] = epoch if component_rpm [ 'name' ] != 'gpg-pubkey' : components . append ( component_rpm ) return components
Parse output of the rpm query .
341
8
28,408
def koji_login ( session , proxyuser = None , ssl_certs_dir = None , krb_principal = None , krb_keytab = None ) : kwargs = { } if proxyuser : kwargs [ 'proxyuser' ] = proxyuser if ssl_certs_dir : # Use certificates logger . info ( "Using SSL certificates for Koji authentication" ) kwargs [ 'cert' ] = os . path . join ( ssl_certs_dir , 'cert' ) # serverca is not required in newer versions of koji, but if set # koji will always ensure file exists # NOTE: older versions of koji may require this to be set, in # that case, make sure serverca is passed in serverca_path = os . path . join ( ssl_certs_dir , 'serverca' ) if os . path . exists ( serverca_path ) : kwargs [ 'serverca' ] = serverca_path # Older versions of koji actually require this parameter, even though # it's completely ignored. kwargs [ 'ca' ] = None result = session . ssl_login ( * * kwargs ) else : # Use Kerberos logger . info ( "Using Kerberos for Koji authentication" ) if krb_principal and krb_keytab : kwargs [ 'principal' ] = krb_principal kwargs [ 'keytab' ] = krb_keytab result = session . krb_login ( * * kwargs ) if not result : raise RuntimeError ( 'Unable to perform Koji authentication' ) return result
Choose the correct login method based on the available credentials and call that method on the provided session object .
365
20
28,409
def create_koji_session ( hub_url , auth_info = None ) : session = KojiSessionWrapper ( koji . ClientSession ( hub_url , opts = { 'krb_rdns' : False } ) ) if auth_info is not None : koji_login ( session , * * auth_info ) return session
Creates and returns a Koji session . If auth_info is provided the session will be authenticated .
76
21
28,410
def stream_task_output ( session , task_id , file_name , blocksize = DEFAULT_DOWNLOAD_BLOCK_SIZE ) : logger . debug ( 'Streaming {} from task {}' . format ( file_name , task_id ) ) offset = 0 contents = '[PLACEHOLDER]' while contents : contents = session . downloadTaskOutput ( task_id , file_name , offset , blocksize ) offset += len ( contents ) if contents : yield contents logger . debug ( 'Finished streaming {} from task {}' . format ( file_name , task_id ) )
Generator to download file from task without loading the whole file into memory .
128
15
28,411
def filename ( self ) : urlpath = unquote ( urlsplit ( self . repourl , allow_fragments = False ) . path ) basename = os . path . basename ( urlpath ) if not basename . endswith ( REPO_SUFFIX ) : basename += REPO_SUFFIX if self . add_hash : suffix = '-' + md5 ( self . repourl . encode ( 'utf-8' ) ) . hexdigest ( ) [ : 5 ] else : suffix = '' final_name = suffix . join ( os . path . splitext ( basename ) ) return final_name
Returns the filename to be used for saving the repo file .
142
12
28,412
def filter_components_by_name ( name , components_list , type_ = T_RPM ) : for components in components_list : for component in components : if component [ 'type' ] == type_ and component [ 'name' ] == name : yield component
Generator filters components from components_list by name
59
10
28,413
def get_component_list_from_workers ( self , worker_metadatas ) : comp_list = [ ] for platform in sorted ( worker_metadatas . keys ( ) ) : for instance in worker_metadatas [ platform ] [ 'output' ] : if instance [ 'type' ] == 'docker-image' : if 'components' not in instance or not instance [ 'components' ] : self . log . warn ( "Missing 'components' key in 'output' metadata instance: %s" , instance ) continue comp_list . append ( instance [ 'components' ] ) return comp_list
Find the component lists from each worker build .
139
9
28,414
def get_rpms ( self ) : tags = [ 'NAME' , 'VERSION' , 'RELEASE' , 'ARCH' , 'EPOCH' , 'SIGMD5' , 'SIGPGP:pgpsig' , 'SIGGPG:pgpsig' , ] cmd = "/bin/rpm " + rpm_qf_args ( tags ) try : # py3 ( status , output ) = subprocess . getstatusoutput ( cmd ) except AttributeError : # py2 with open ( '/dev/null' , 'r+' ) as devnull : p = subprocess . Popen ( cmd , shell = True , stdin = devnull , stdout = subprocess . PIPE , stderr = devnull ) ( stdout , stderr ) = p . communicate ( ) status = p . wait ( ) output = stdout . decode ( ) if status != 0 : self . log . debug ( "%s: stderr output: %s" , cmd , stderr ) raise RuntimeError ( "%s: exit code %s" % ( cmd , status ) ) return parse_rpm_output ( output . splitlines ( ) , tags )
Build a list of installed RPMs in the format required for the metadata .
259
15
28,415
def get_output_metadata ( self , path , filename ) : checksums = get_checksums ( path , [ 'md5' ] ) metadata = { 'filename' : filename , 'filesize' : os . path . getsize ( path ) , 'checksum' : checksums [ 'md5sum' ] , 'checksum_type' : 'md5' } if self . metadata_only : metadata [ 'metadata_only' ] = True return metadata
Describe a file by its metadata .
101
8
28,416
def get_builder_image_id ( self ) : try : buildroot_tag = os . environ [ "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" ] except KeyError : return '' try : pod = self . osbs . get_pod_for_build ( self . build_id ) all_images = pod . get_container_image_ids ( ) except OsbsException as ex : self . log . error ( "unable to find image id: %r" , ex ) return buildroot_tag try : return all_images [ buildroot_tag ] except KeyError : self . log . error ( "Unable to determine buildroot image ID for %s" , buildroot_tag ) return buildroot_tag
Find out the docker ID of the buildroot image we are in .
166
14
28,417
def get_image_components ( self ) : output = self . workflow . image_components if output is None : self . log . error ( "%s plugin did not run!" , PostBuildRPMqaPlugin . key ) output = [ ] return output
Re - package the output of the rpmqa plugin into the format required for the metadata .
55
18
28,418
def get_digests ( self ) : try : pulp = get_manifests_in_pulp_repository ( self . workflow ) except KeyError : pulp = None digests = { } # repository -> digests for registry in self . workflow . push_conf . docker_registries : for image in self . workflow . tag_conf . images : image_str = image . to_str ( ) if image_str in registry . digests : image_digests = registry . digests [ image_str ] if pulp is None : digest_list = [ image_digests . default ] else : # If Pulp is enabled, only report digests that # were synced into Pulp. This may not be all # of them, depending on whether Pulp has # schema 2 support. digest_list = [ digest for digest in ( image_digests . v1 , image_digests . v2 ) if digest in pulp ] digests [ image . to_str ( registry = False ) ] = digest_list return digests
Returns a map of images to their digests
224
9
28,419
def get_repositories ( self , digests ) : if self . workflow . push_conf . pulp_registries : # If pulp was used, only report pulp images registries = self . workflow . push_conf . pulp_registries else : # Otherwise report all the images we pushed registries = self . workflow . push_conf . all_registries output_images = [ ] for registry in registries : image = self . pullspec_image . copy ( ) image . registry = registry . uri pullspec = image . to_str ( ) output_images . append ( pullspec ) digest_list = digests . get ( image . to_str ( registry = False ) , ( ) ) for digest in digest_list : digest_pullspec = image . to_str ( tag = False ) + "@" + digest output_images . append ( digest_pullspec ) return output_images
Build the repositories metadata
194
4
28,420
def upload_file ( self , session , output , serverdir ) : name = output . metadata [ 'filename' ] self . log . debug ( "uploading %r to %r as %r" , output . file . name , serverdir , name ) kwargs = { } if self . blocksize is not None : kwargs [ 'blocksize' ] = self . blocksize self . log . debug ( "using blocksize %d" , self . blocksize ) upload_logger = KojiUploadLogger ( self . log ) session . uploadWrapper ( output . file . name , serverdir , name = name , callback = upload_logger . callback , * * kwargs ) path = os . path . join ( serverdir , name ) self . log . debug ( "uploaded %r" , path ) return path
Upload a file to koji
182
6
28,421
def get_manifest_list_only_expectation ( self ) : if not self . workflow . postbuild_results . get ( PLUGIN_GROUP_MANIFESTS_KEY ) : self . log . debug ( 'Cannot check if only manifest list digest should be returned ' 'because group manifests plugin did not run' ) return False platforms = get_platforms ( self . workflow ) if not platforms : self . log . debug ( 'Cannot check if only manifest list digest should be returned ' 'because we have no platforms list' ) return False try : platform_to_goarch = get_platform_to_goarch_mapping ( self . workflow ) except KeyError : self . log . debug ( 'Cannot check if only manifest list digest should be returned ' 'because there are no platform descriptors' ) return False for plat in platforms : if platform_to_goarch [ plat ] == 'amd64' : self . log . debug ( 'amd64 was built, all media types available' ) return False self . log . debug ( 'amd64 was not built, only manifest list digest is available' ) return True
Get expectation for manifest list only
244
6
28,422
def run ( self ) : build_json = get_build_json ( ) current_platform = platform . processor ( ) or 'x86_64' self . manifest_list_cache = { } organization = get_registries_organization ( self . workflow ) for nonce , parent in enumerate ( sorted ( self . workflow . builder . parent_images . keys ( ) , key = str ) ) : if base_image_is_custom ( parent . to_str ( ) ) : continue image = parent is_base_image = False # original_base_image is an ImageName, so compare parent as an ImageName also if image == self . workflow . builder . original_base_image : is_base_image = True image = self . _resolve_base_image ( build_json ) image = self . _ensure_image_registry ( image ) if organization : image . enclose ( organization ) parent . enclose ( organization ) if self . check_platforms : # run only at orchestrator self . _validate_platforms_in_image ( image ) self . _collect_image_digests ( image ) # try to stay with digests image_with_digest = self . _get_image_with_digest ( image , current_platform ) if image_with_digest is None : self . log . warning ( "Cannot resolve platform '%s' specific digest for image '%s'" , current_platform , image ) else : self . log . info ( "Replacing image '%s' with '%s'" , image , image_with_digest ) image = image_with_digest if self . check_platforms : new_arch_image = self . _get_image_for_different_arch ( image , current_platform ) if new_arch_image : image = new_arch_image if self . inspect_only : new_image = image else : new_image = self . _pull_and_tag_image ( image , build_json , str ( nonce ) ) self . workflow . builder . recreate_parent_images ( ) self . workflow . builder . parent_images [ parent ] = new_image if is_base_image : if organization : # we want to be sure we have original_base_image enclosed as well self . workflow . builder . original_base_image . enclose ( organization ) self . workflow . builder . set_base_image ( str ( new_image ) , insecure = self . parent_registry_insecure , dockercfg_path = self . parent_registry_dockercfg_path ) self . workflow . builder . parents_pulled = not self . inspect_only self . workflow . builder . base_image_insecure = self . parent_registry_insecure
Pull parent images and retag them uniquely for this build .
608
12
28,423
def _get_image_for_different_arch ( self , image , platform ) : parents_digests = self . workflow . builder . parent_images_digests try : digests = parents_digests . get_image_digests ( image ) except KeyError : return None if not digests : return None platform_digest = digests . get ( platform ) if platform_digest is None : # exact match is not found, get random platform platform_digest = tuple ( digests . values ( ) ) [ 0 ] new_image = ImageName . parse ( platform_digest ) return new_image
Get image from random arch
133
5
28,424
def _resolve_base_image ( self , build_json ) : spec = build_json . get ( "spec" ) try : image_id = spec [ 'triggeredBy' ] [ 0 ] [ 'imageChangeBuild' ] [ 'imageID' ] except ( TypeError , KeyError , IndexError ) : # build not marked for auto-rebuilds; use regular base image base_image = self . workflow . builder . base_image self . log . info ( "using %s as base image." , base_image ) else : # build has auto-rebuilds enabled self . log . info ( "using %s from build spec[triggeredBy] as base image." , image_id ) base_image = ImageName . parse ( image_id ) # any exceptions will propagate return base_image
If this is an auto - rebuild adjust the base image to use the triggering build
179
16
28,425
def _ensure_image_registry ( self , image ) : image_with_registry = image . copy ( ) if self . parent_registry : # if registry specified in Dockerfile image, ensure it's the one allowed by config if image . registry and image . registry != self . parent_registry : error = ( "Registry specified in dockerfile image doesn't match configured one. " "Dockerfile: '%s'; expected registry: '%s'" % ( image , self . parent_registry ) ) self . log . error ( "%s" , error ) raise RuntimeError ( error ) image_with_registry . registry = self . parent_registry return image_with_registry
If plugin configured with a parent registry ensure the image uses it
154
12
28,426
def _pull_and_tag_image ( self , image , build_json , nonce ) : image = image . copy ( ) first_library_exc = None for _ in range ( 20 ) : # retry until pull and tag is successful or definitively fails. # should never require 20 retries but there's a race condition at work. # just in case something goes wildly wrong, limit to 20 so it terminates. try : self . tasker . pull_image ( image , insecure = self . parent_registry_insecure , dockercfg_path = self . parent_registry_dockercfg_path ) self . workflow . pulled_base_images . add ( image . to_str ( ) ) except RetryGeneratorException as exc : # getting here means the pull itself failed. we may want to retry if the # image being pulled lacks a namespace, like e.g. "rhel7". we cannot count # on the registry mapping this into the docker standard "library/rhel7" so # need to retry with that. if first_library_exc : # we already tried and failed; report the first failure. raise first_library_exc if image . namespace : # already namespaced, do not retry with "library/", just fail. raise self . log . info ( "'%s' not found" , image . to_str ( ) ) image . namespace = 'library' self . log . info ( "trying '%s'" , image . to_str ( ) ) first_library_exc = exc # report first failure if retry also fails continue # Attempt to tag it using a unique ID. We might have to retry # if another build with the same parent image is finishing up # and removing images it pulled. # Use the OpenShift build name as the unique ID unique_id = build_json [ 'metadata' ] [ 'name' ] new_image = ImageName ( repo = unique_id , tag = nonce ) try : self . log . info ( "tagging pulled image" ) response = self . tasker . tag_image ( image , new_image ) self . workflow . pulled_base_images . add ( response ) self . log . debug ( "image '%s' is available as '%s'" , image , new_image ) return new_image except docker . errors . NotFound : # If we get here, some other build raced us to remove # the parent image, and that build won. # Retry the pull immediately. self . log . info ( "re-pulling removed image" ) continue # Failed to tag it after 20 tries self . log . error ( "giving up trying to pull image" ) raise RuntimeError ( "too many attempts to pull and tag image" )
Docker pull the image and tag it uniquely for use by this build
593
14
28,427
def _get_manifest_list ( self , image ) : if image in self . manifest_list_cache : return self . manifest_list_cache [ image ] manifest_list = get_manifest_list ( image , image . registry , insecure = self . parent_registry_insecure , dockercfg_path = self . parent_registry_dockercfg_path ) if '@sha256:' in str ( image ) and not manifest_list : # we want to adjust the tag only for manifest list fetching image = image . copy ( ) try : config_blob = get_config_from_registry ( image , image . registry , image . tag , insecure = self . parent_registry_insecure , dockercfg_path = self . parent_registry_dockercfg_path ) except ( HTTPError , RetryError , Timeout ) as ex : self . log . warning ( 'Unable to fetch config for %s, got error %s' , image , ex . response . status_code ) raise RuntimeError ( 'Unable to fetch config for base image' ) release = config_blob [ 'config' ] [ 'Labels' ] [ 'release' ] version = config_blob [ 'config' ] [ 'Labels' ] [ 'version' ] docker_tag = "%s-%s" % ( version , release ) image . tag = docker_tag manifest_list = get_manifest_list ( image , image . registry , insecure = self . parent_registry_insecure , dockercfg_path = self . parent_registry_dockercfg_path ) self . manifest_list_cache [ image ] = manifest_list return self . manifest_list_cache [ image ]
try to figure out manifest list
383
6
28,428
def _validate_platforms_in_image ( self , image ) : expected_platforms = get_platforms ( self . workflow ) if not expected_platforms : self . log . info ( 'Skipping validation of available platforms ' 'because expected platforms are unknown' ) return if len ( expected_platforms ) == 1 : self . log . info ( 'Skipping validation of available platforms for base image ' 'because this is a single platform build' ) return if not image . registry : self . log . info ( 'Cannot validate available platforms for base image ' 'because base image registry is not defined' ) return try : platform_to_arch = get_platform_to_goarch_mapping ( self . workflow ) except KeyError : self . log . info ( 'Cannot validate available platforms for base image ' 'because platform descriptors are not defined' ) return manifest_list = self . _get_manifest_list ( image ) if not manifest_list : raise RuntimeError ( 'Unable to fetch manifest list for base image' ) all_manifests = manifest_list . json ( ) [ 'manifests' ] manifest_list_arches = set ( manifest [ 'platform' ] [ 'architecture' ] for manifest in all_manifests ) expected_arches = set ( platform_to_arch [ platform ] for platform in expected_platforms ) self . log . info ( 'Manifest list arches: %s, expected arches: %s' , manifest_list_arches , expected_arches ) assert manifest_list_arches >= expected_arches , 'Missing arches in manifest list for base image' self . log . info ( 'Base image is a manifest list for all required platforms' )
Ensure that the image provides all platforms expected for the build .
381
13
28,429
def get_dockercfg_credentials ( self , docker_registry ) : if not self . registry_secret_path : return { } dockercfg = Dockercfg ( self . registry_secret_path ) registry_creds = dockercfg . get_credentials ( docker_registry ) if 'username' not in registry_creds : return { } return { 'basic_auth_username' : registry_creds [ 'username' ] , 'basic_auth_password' : registry_creds [ 'password' ] , }
Read the . dockercfg file and return an empty dict or else a dict with keys basic_auth_username and basic_auth_password .
126
30
28,430
def start_compose ( self , source_type , source , packages = None , sigkeys = None , arches = None , flags = None , multilib_arches = None , multilib_method = None , modular_koji_tags = None ) : body = { 'source' : { 'type' : source_type , 'source' : source } } if source_type == "tag" : body [ 'source' ] [ 'packages' ] = packages or [ ] if sigkeys is not None : body [ 'source' ] [ 'sigkeys' ] = sigkeys if flags is not None : body [ 'flags' ] = flags if arches is not None : body [ 'arches' ] = arches if multilib_arches : body [ 'multilib_arches' ] = multilib_arches body [ 'multilib_method' ] = multilib_method or MULTILIB_METHOD_DEFAULT if source_type == "module" and modular_koji_tags : body [ 'modular_koji_tags' ] = modular_koji_tags logger . info ( "Starting compose: %s" , body ) response = self . session . post ( '{}composes/' . format ( self . url ) , json = body ) response . raise_for_status ( ) return response . json ( )
Start a new ODCS compose
297
6
28,431
def renew_compose ( self , compose_id ) : logger . info ( "Renewing compose %d" , compose_id ) response = self . session . patch ( '{}composes/{}' . format ( self . url , compose_id ) ) response . raise_for_status ( ) response_json = response . json ( ) compose_id = response_json [ 'id' ] logger . info ( "Renewed compose is %d" , compose_id ) return response_json
Renew or extend existing compose
111
6
28,432
def wait_for_compose ( self , compose_id , burst_retry = 1 , burst_length = 30 , slow_retry = 10 , timeout = 1800 ) : logger . debug ( "Getting compose information for information for compose_id={}" . format ( compose_id ) ) url = '{}composes/{}' . format ( self . url , compose_id ) start_time = time . time ( ) while True : response = self . session . get ( url ) response . raise_for_status ( ) response_json = response . json ( ) if response_json [ 'state_name' ] == 'failed' : state_reason = response_json . get ( 'state_reason' , 'Unknown' ) logger . error ( dedent ( """\ Compose %s failed: %s Details: %s """ ) , compose_id , state_reason , json . dumps ( response_json , indent = 4 ) ) raise RuntimeError ( 'Failed request for compose_id={}: {}' . format ( compose_id , state_reason ) ) if response_json [ 'state_name' ] not in [ 'wait' , 'generating' ] : logger . debug ( "Retrieved compose information for compose_id={}: {}" . format ( compose_id , json . dumps ( response_json , indent = 4 ) ) ) return response_json elapsed = time . time ( ) - start_time if elapsed > timeout : raise RuntimeError ( "Retrieving %s timed out after %s seconds" % ( url , timeout ) ) else : logger . debug ( "Retrying request compose_id={}, elapsed_time={}" . format ( compose_id , elapsed ) ) if elapsed > burst_length : time . sleep ( slow_retry ) else : time . sleep ( burst_retry )
Wait for compose request to finalize
396
7
28,433
def _find_stages ( self ) : stages = [ ] end = last_user_found = None for part in reversed ( self . dfp . structure ) : if end is None : end = part if part [ 'instruction' ] == 'USER' and not last_user_found : # we will reuse the line verbatim last_user_found = part [ 'content' ] if part [ 'instruction' ] == 'FROM' : stages . insert ( 0 , { 'from_structure' : part , 'end_structure' : end , 'stage_user' : last_user_found } ) end = last_user_found = None return stages
Find limits of each Dockerfile stage
148
7
28,434
def get_digests ( self ) : digests = { } # repository -> digest for registry in self . workflow . push_conf . docker_registries : for image in self . workflow . tag_conf . images : image_str = image . to_str ( ) if image_str in registry . digests : digest = registry . digests [ image_str ] digests [ image . to_str ( registry = False ) ] = digest return digests
Returns a map of repositories to digests
98
8
28,435
def _get_registries ( self ) : if self . workflow . buildstep_result . get ( PLUGIN_BUILD_ORCHESTRATE_KEY ) : registries = self . workflow . push_conf . pulp_registries if not registries : registries = self . workflow . push_conf . all_registries return registries else : return self . workflow . push_conf . all_registries
Return a list of registries that this build updated
91
10
28,436
def get_repositories_and_digests ( self ) : digests = { } # image -> digests typed_digests = { } # media_type -> digests for registry in self . workflow . push_conf . docker_registries : for image in self . workflow . tag_conf . images : image_str = image . to_str ( ) if image_str in registry . digests : image_digests = registry . digests [ image_str ] if self . report_multiple_digests and get_pulp ( self . workflow , None ) : digest_list = [ digest for digest in ( image_digests . v1 , image_digests . v2 ) if digest ] else : digest_list = [ self . select_digest ( image_digests ) ] digests [ image . to_str ( registry = False ) ] = digest_list for digest_version in image_digests . content_type : if digest_version not in image_digests : continue if not get_pulp ( self . workflow , None ) and digest_version == 'v1' : continue digest_type = get_manifest_media_type ( digest_version ) typed_digests [ digest_type ] = image_digests [ digest_version ] if self . workflow . push_conf . pulp_registries : # If pulp was used, only report pulp images registries = self . workflow . push_conf . pulp_registries else : # Otherwise report all the images we pushed registries = self . workflow . push_conf . all_registries repositories = [ ] for registry in registries : image = self . pullspec_image . copy ( ) image . registry = registry . uri pullspec = image . to_str ( ) repositories . append ( pullspec ) digest_list = digests . get ( image . to_str ( registry = False ) , ( ) ) for digest in digest_list : digest_pullspec = image . to_str ( tag = False ) + "@" + digest repositories . append ( digest_pullspec ) return repositories , typed_digests
Returns a map of images to their repositories and a map of media types to each digest
456
17
28,437
def update_buildroot_koji ( self , buildroot , output ) : docker = output [ 1 ] [ 'extra' ] [ 'docker' ] name = '' for tag in docker [ 'tags' ] : for repo in docker [ 'repositories' ] : if tag in repo : iname = ImageName . parse ( repo ) name = iname . to_str ( registry = False ) break buildroot [ 'extra' ] [ 'osbs' ] [ 'koji' ] = { 'build_name' : name , 'builder_image_id' : docker . get ( 'digests' , { } ) }
put the final koji information in the buildroot under extra . osbs
138
15
28,438
def get_metadata ( self ) : try : metadata = get_build_json ( ) [ "metadata" ] self . build_id = metadata [ "name" ] except KeyError : self . log . error ( "No build metadata" ) raise for image in self . workflow . tag_conf . unique_images : self . pullspec_image = image break for image in self . workflow . tag_conf . primary_images : # dash at first/last postition does not count if '-' in image . tag [ 1 : - 1 ] : self . pullspec_image = image break if not self . pullspec_image : raise RuntimeError ( 'Unable to determine pullspec_image' ) metadata_version = 0 buildroot = self . get_buildroot ( build_id = self . build_id ) output_files = self . get_output ( buildroot [ 'id' ] ) output = [ output . metadata for output in output_files ] koji_metadata = { 'metadata_version' : metadata_version , 'buildroots' : [ buildroot ] , 'output' : output , } self . update_buildroot_koji ( buildroot , output ) return koji_metadata , output_files
Build the metadata needed for importing the build
263
8
28,439
def get_worker_digests ( self ) : try : builds = self . workflow . build_result . annotations [ 'worker-builds' ] except ( TypeError , KeyError ) : # This annotation is only set for the orchestrator build. # It's not present, so this is a worker build. return { } worker_digests = { } for plat , annotation in builds . items ( ) : digests = annotation [ 'digests' ] self . log . debug ( "build %s has digests: %s" , plat , digests ) for digest in digests : reg = registry_hostname ( digest [ 'registry' ] ) worker_digests . setdefault ( reg , [ ] ) worker_digests [ reg ] . append ( digest ) return worker_digests
If we are being called from an orchestrator build collect the worker node data and recreate the data locally .
171
21
28,440
def make_remote_image_result ( annotations = None , labels = None ) : return BuildResult ( image_id = BuildResult . REMOTE_IMAGE , annotations = annotations , labels = labels )
Instantiate BuildResult for image not built locally .
43
10
28,441
def base_image_inspect ( self ) : if self . _base_image_inspect is None : if self . base_from_scratch : self . _base_image_inspect = { } elif self . parents_pulled or self . custom_base_image : try : self . _base_image_inspect = self . tasker . inspect_image ( self . base_image ) except docker . errors . NotFound : # If the base image cannot be found throw KeyError - # as this property should behave like a dict raise KeyError ( "Unprocessed base image Dockerfile cannot be inspected" ) else : self . _base_image_inspect = atomic_reactor . util . get_inspect_for_image ( self . base_image , self . base_image . registry , self . base_image_insecure , self . base_image_dockercfg_path ) base_image_str = str ( self . base_image ) if base_image_str not in self . _parent_images_inspect : self . _parent_images_inspect [ base_image_str ] = self . _base_image_inspect return self . _base_image_inspect
inspect base image
268
4
28,442
def parent_image_inspect ( self , image ) : image_name = ImageName . parse ( image ) if image_name not in self . _parent_images_inspect : if self . parents_pulled : self . _parent_images_inspect [ image_name ] = self . tasker . inspect_image ( image ) else : self . _parent_images_inspect [ image_name ] = atomic_reactor . util . get_inspect_for_image ( image_name , image_name . registry , self . base_image_insecure , self . base_image_dockercfg_path ) return self . _parent_images_inspect [ image_name ]
inspect parent image
154
4
28,443
def inspect_built_image ( self ) : logger . info ( "inspecting built image '%s'" , self . image_id ) self . ensure_is_built ( ) # dict with lots of data, see man docker-inspect inspect_data = self . tasker . inspect_image ( self . image_id ) return inspect_data
inspect built image
75
4
28,444
def get_base_image_info ( self ) : if self . base_from_scratch : return logger . info ( "getting information about base image '%s'" , self . base_image ) image_info = self . tasker . get_image_info_by_image_name ( self . base_image ) items_count = len ( image_info ) if items_count == 1 : return image_info [ 0 ] elif items_count <= 0 : logger . error ( "image '%s' not found" , self . base_image ) raise RuntimeError ( "image '%s' not found" % self . base_image ) else : logger . error ( "multiple (%d) images found for image '%s'" , items_count , self . base_image ) raise RuntimeError ( "multiple (%d) images found for image '%s'" % ( items_count , self . base_image ) )
query docker about base image
203
5
28,445
def get_built_image_info ( self ) : logger . info ( "getting information about built image '%s'" , self . image ) image_info = self . tasker . get_image_info_by_image_name ( self . image ) items_count = len ( image_info ) if items_count == 1 : return image_info [ 0 ] elif items_count <= 0 : logger . error ( "image '%s' not found" , self . image ) raise RuntimeError ( "image '%s' not found" % self . image ) else : logger . error ( "multiple (%d) images found for image '%s'" , items_count , self . image ) raise RuntimeError ( "multiple (%d) images found for image '%s'" % ( items_count , self . image ) )
query docker about built image
180
5
28,446
def build_inside ( input_method , input_args = None , substitutions = None ) : def process_keyvals ( keyvals ) : """ ["key=val", "x=y"] -> {"key": "val", "x": "y"} """ keyvals = keyvals or [ ] processed_keyvals = { } for arg in keyvals : key , value = arg . split ( "=" , 1 ) processed_keyvals [ key ] = value return processed_keyvals main = __name__ . split ( '.' , 1 ) [ 0 ] log_encoding = get_logging_encoding ( main ) logger . info ( "log encoding: %s" , log_encoding ) if not input_method : raise RuntimeError ( "No input method specified!" ) logger . debug ( "getting build json from input %s" , input_method ) cleaned_input_args = process_keyvals ( input_args ) cleaned_input_args [ 'substitutions' ] = process_keyvals ( substitutions ) input_runner = InputPluginsRunner ( [ { 'name' : input_method , 'args' : cleaned_input_args } ] ) build_json = input_runner . run ( ) [ input_method ] if isinstance ( build_json , Exception ) : raise RuntimeError ( "Input plugin raised exception: {}" . format ( build_json ) ) logger . debug ( "build json: %s" , build_json ) if not build_json : raise RuntimeError ( "No valid build json!" ) if not isinstance ( build_json , dict ) : raise RuntimeError ( "Input plugin did not return valid build json: {}" . format ( build_json ) ) dbw = DockerBuildWorkflow ( * * build_json ) try : build_result = dbw . build_docker_image ( ) except Exception as e : logger . error ( 'image build failed: %s' , e ) raise else : if not build_result or build_result . is_failed ( ) : raise RuntimeError ( "no image built" ) else : logger . info ( "build has finished successfully \\o/" )
use requested input plugin to load configuration and then initiate build
465
11
28,447
def run ( self ) : while True : # make sure to run at least once before exiting with self . _lock : self . _update ( self . _data ) if self . _done : break time . sleep ( 1 )
Overrides parent method to implement thread s functionality .
48
11
28,448
def get_config ( workflow ) : try : workspace = workflow . plugin_workspace [ ReactorConfigPlugin . key ] return workspace [ WORKSPACE_CONF_KEY ] except KeyError : # The plugin did not run or was not successful: use defaults conf = ReactorConfig ( ) workspace = workflow . plugin_workspace . get ( ReactorConfigPlugin . key , { } ) workspace [ WORKSPACE_CONF_KEY ] = conf workflow . plugin_workspace [ ReactorConfigPlugin . key ] = workspace return conf
Obtain configuration object Does not fail
114
7
28,449
def run ( self ) : if self . reactor_config_map : self . log . info ( "reading config from REACTOR_CONFIG env variable" ) conf = read_yaml ( self . reactor_config_map , 'schemas/config.json' ) else : config_filename = os . path . join ( self . config_path , self . basename ) self . log . info ( "reading config from %s" , config_filename ) conf = read_yaml_from_file_path ( config_filename , 'schemas/config.json' ) reactor_conf = ReactorConfig ( conf ) workspace = self . workflow . plugin_workspace . setdefault ( self . key , { } ) workspace [ WORKSPACE_CONF_KEY ] = reactor_conf self . log . info ( "reading config content %s" , reactor_conf . conf ) # need to stash this on the workflow for access in a place that can't import this module self . workflow . default_image_build_method = get_default_image_build_method ( self . workflow )
Run the plugin
239
3
28,450
def _check_build_input ( self , image , args_path ) : try : with open ( os . path . join ( args_path , BUILD_JSON ) ) as json_args : logger . debug ( "build input: image = '%s', args = '%s'" , image , json_args . read ( ) ) except ( IOError , OSError ) as ex : logger . error ( "unable to open json arguments: %r" , ex ) raise RuntimeError ( "Unable to open json arguments: %r" % ex ) if not self . tasker . image_exists ( image ) : logger . error ( "provided build image doesn't exist: '%s'" , image ) raise RuntimeError ( "Provided build image doesn't exist: '%s'" % image )
Internal method validate provided args .
177
6
28,451
def build_image_from_path ( self , path , image , use_cache = False , remove_im = True ) : logger . info ( "building image '%s' from path '%s'" , image , path ) response = self . d . build ( path = path , tag = image . to_str ( ) , nocache = not use_cache , decode = True , rm = remove_im , forcerm = True , pull = False ) # returns generator return response
build image from provided path and tag it
106
8
28,452
def build_image_from_git ( self , url , image , git_path = None , git_commit = None , copy_dockerfile_to = None , stream = False , use_cache = False ) : logger . info ( "building image '%s' from git repo '%s' specified as URL '%s'" , image , git_path , url ) logger . info ( "will copy Dockerfile to '%s'" , copy_dockerfile_to ) temp_dir = tempfile . mkdtemp ( ) response = None try : clone_git_repo ( url , temp_dir , git_commit ) build_file_path , build_file_dir = figure_out_build_file ( temp_dir , git_path ) if copy_dockerfile_to : # TODO: pre build plugin shutil . copyfile ( build_file_path , copy_dockerfile_to ) response = self . build_image_from_path ( build_file_dir , image , use_cache = use_cache ) finally : try : shutil . rmtree ( temp_dir ) except ( IOError , OSError ) as ex : # no idea why this is happening logger . warning ( "Failed to remove dir '%s': %r" , temp_dir , ex ) logger . info ( "build finished" ) return response
build image from provided url and tag it
297
8
28,453
def run ( self , image , command = None , create_kwargs = None , start_kwargs = None , volume_bindings = None , privileged = None ) : logger . info ( "creating container from image '%s' and running it" , image ) create_kwargs = create_kwargs or { } if 'host_config' not in create_kwargs : conf = { } if volume_bindings is not None : conf [ 'binds' ] = volume_bindings if privileged is not None : conf [ 'privileged' ] = privileged create_kwargs [ 'host_config' ] = self . d . create_host_config ( * * conf ) start_kwargs = start_kwargs or { } logger . debug ( "image = '%s', command = '%s', create_kwargs = '%s', start_kwargs = '%s'" , image , command , create_kwargs , start_kwargs ) if isinstance ( image , ImageName ) : image = image . to_str ( ) container_dict = self . d . create_container ( image , command = command , * * create_kwargs ) container_id = container_dict [ 'Id' ] logger . debug ( "container_id = '%s'" , container_id ) self . d . start ( container_id , * * start_kwargs ) # returns None return container_id
create container from provided image and start it
309
8
28,454
def commit_container ( self , container_id , image = None , message = None ) : logger . info ( "committing container '%s'" , container_id ) logger . debug ( "container_id = '%s', image = '%s', message = '%s'" , container_id , image , message ) tag = None if image : tag = image . tag image = image . to_str ( tag = False ) response = self . d . commit ( container_id , repository = image , tag = tag , message = message ) logger . debug ( "response = '%s'" , response ) try : return response [ 'Id' ] except KeyError : logger . error ( "ID missing from commit response" ) raise RuntimeError ( "ID missing from commit response" )
create image from provided container
169
5
28,455
def pull_image ( self , image , insecure = False , dockercfg_path = None ) : logger . info ( "pulling image '%s' from registry" , image ) logger . debug ( "image = '%s', insecure = '%s'" , image , insecure ) tag = image . tag if dockercfg_path : self . login ( registry = image . registry , docker_secret_path = dockercfg_path ) try : command_result = self . retry_generator ( self . d . pull , image . to_str ( tag = False ) , tag = tag , insecure_registry = insecure , decode = True , stream = True ) except TypeError : # because changing api is fun command_result = self . retry_generator ( self . d . pull , image . to_str ( tag = False ) , tag = tag , decode = True , stream = True ) self . last_logs = command_result . logs return image . to_str ( )
pull provided image from registry
217
5
28,456
def tag_image ( self , image , target_image , force = False ) : logger . info ( "tagging image '%s' as '%s'" , image , target_image ) logger . debug ( "image = '%s', target_image_name = '%s'" , image , target_image ) if not isinstance ( image , ImageName ) : image = ImageName . parse ( image ) if image != target_image : response = self . d . tag ( image . to_str ( ) , target_image . to_str ( tag = False ) , tag = target_image . tag , force = force ) # returns True/False if not response : logger . error ( "failed to tag image" ) raise RuntimeError ( "Failed to tag image '%s': target_image = '%s'" % image . to_str ( ) , target_image ) else : logger . debug ( 'image already tagged correctly, nothing to do' ) return target_image . to_str ( )
tag provided image with specified image_name registry and tag
220
11
28,457
def login ( self , registry , docker_secret_path ) : logger . info ( "logging in: registry '%s', secret path '%s'" , registry , docker_secret_path ) # Docker-py needs username dockercfg = Dockercfg ( docker_secret_path ) credentials = dockercfg . get_credentials ( registry ) unpacked_auth = dockercfg . unpack_auth_b64 ( registry ) username = credentials . get ( 'username' ) if unpacked_auth : username = unpacked_auth . username if not username : raise RuntimeError ( "Failed to extract a username from '%s'" % dockercfg ) logger . info ( "found username %s for registry %s" , username , registry ) response = self . d . login ( registry = registry , username = username , dockercfg_path = dockercfg . json_secret_path ) if not response : raise RuntimeError ( "Failed to login to '%s' with config '%s'" % ( registry , dockercfg ) ) if u'Status' in response and response [ u'Status' ] == u'Login Succeeded' : logger . info ( "login succeeded" ) else : if not ( isinstance ( response , dict ) and 'password' in response . keys ( ) ) : # for some reason docker-py returns the contents of the dockercfg - we shouldn't # be displaying that logger . debug ( "response: %r" , response )
login to docker registry
322
4
28,458
def push_image ( self , image , insecure = False ) : logger . info ( "pushing image '%s'" , image ) logger . debug ( "image: '%s', insecure: '%s'" , image , insecure ) try : # push returns string composed of newline separated jsons; exactly what 'docker push' # outputs command_result = self . retry_generator ( self . d . push , image . to_str ( tag = False ) , tag = image . tag , insecure_registry = insecure , decode = True , stream = True ) except TypeError : # because changing api is fun command_result = self . retry_generator ( self . d . push , image . to_str ( tag = False ) , tag = image . tag , decode = True , stream = True ) self . last_logs = command_result . logs return command_result . parsed_logs
push provided image to registry
197
5
28,459
def tag_and_push_image ( self , image , target_image , insecure = False , force = False , dockercfg = None ) : logger . info ( "tagging and pushing image '%s' as '%s'" , image , target_image ) logger . debug ( "image = '%s', target_image = '%s'" , image , target_image ) self . tag_image ( image , target_image , force = force ) if dockercfg : self . login ( registry = target_image . registry , docker_secret_path = dockercfg ) return self . push_image ( target_image , insecure = insecure )
tag provided image and push it to registry
142
8
28,460
def remove_image ( self , image_id , force = False , noprune = False ) : logger . info ( "removing image '%s' from filesystem" , image_id ) logger . debug ( "image_id = '%s'" , image_id ) if isinstance ( image_id , ImageName ) : image_id = image_id . to_str ( ) self . d . remove_image ( image_id , force = force , noprune = noprune )
remove provided image from filesystem
112
5
28,461
def remove_container ( self , container_id , force = False ) : logger . info ( "removing container '%s' from filesystem" , container_id ) logger . debug ( "container_id = '%s'" , container_id ) self . d . remove_container ( container_id , force = force )
remove provided container from filesystem
70
5
28,462
def image_exists ( self , image_id ) : logger . info ( "checking whether image '%s' exists" , image_id ) logger . debug ( "image_id = '%s'" , image_id ) try : response = self . d . inspect_image ( image_id ) except APIError as ex : logger . warning ( repr ( ex ) ) response = False else : response = response is not None logger . debug ( "image exists: %s" , response ) return response
does provided image exists?
108
5
28,463
def get_volumes_for_container ( self , container_id , skip_empty_source = True ) : logger . info ( "listing volumes for container '%s'" , container_id ) inspect_output = self . d . inspect_container ( container_id ) volumes = inspect_output [ 'Mounts' ] or [ ] volume_names = [ x [ 'Name' ] for x in volumes ] if skip_empty_source : # Don't show volumes which are not on the filesystem volume_names = [ x [ 'Name' ] for x in volumes if x [ 'Source' ] != "" ] logger . debug ( "volumes = %s" , volume_names ) return volume_names
get a list of volumes mounter in a container
153
10
28,464
def remove_volume ( self , volume_name ) : logger . info ( "removing volume '%s'" , volume_name ) try : self . d . remove_volume ( volume_name ) except APIError as ex : if ex . response . status_code == requests . codes . CONFLICT : logger . debug ( "ignoring a conflict when removing volume %s" , volume_name ) else : raise ex
remove a volume by its name
90
6
28,465
def run ( self ) : env_name = self . env_name or BUILD_JSON_ENV try : build_cfg_json = os . environ [ env_name ] except KeyError : self . log . error ( "build config not found in env variable '%s'" , env_name ) return None else : try : return self . substitute_configuration ( json . loads ( build_cfg_json ) ) except ValueError : self . log . error ( "couldn't load build config: invalid json" ) return None
get json with build config from environment variable
116
8
28,466
def get_default_image_build_conf ( self ) : target = self . koji_target vcs_info = self . workflow . source . get_vcs_info ( ) ksurl = '{}#{}' . format ( vcs_info . vcs_url , vcs_info . vcs_ref ) base_urls = [ ] for repo in self . repos : for url in self . extract_base_url ( repo ) : # Imagefactory only supports $arch variable. url = url . replace ( '$basearch' , '$arch' ) base_urls . append ( url ) install_tree = base_urls [ 0 ] if base_urls else '' repo = ',' . join ( base_urls ) kwargs = { 'target' : target , 'ksurl' : ksurl , 'install_tree' : install_tree , 'repo' : repo , } config_fp = StringIO ( self . DEFAULT_IMAGE_BUILD_CONF . format ( * * kwargs ) ) config = ConfigParser ( ) config . readfp ( config_fp ) self . update_config_from_dockerfile ( config ) return config
Create a default image build config
268
6
28,467
def update_config_from_dockerfile ( self , config ) : labels = Labels ( df_parser ( self . workflow . builder . df_path ) . labels ) for config_key , label in ( ( 'name' , Labels . LABEL_TYPE_COMPONENT ) , ( 'version' , Labels . LABEL_TYPE_VERSION ) , ) : try : _ , value = labels . get_name_and_value ( label ) except KeyError : pass else : config . set ( 'image-build' , config_key , value )
Updates build config with values from the Dockerfile
125
10
28,468
def load_plugins ( self , plugin_class_name ) : # imp.findmodule('atomic_reactor') doesn't work plugins_dir = os . path . join ( os . path . dirname ( __file__ ) , 'plugins' ) logger . debug ( "loading plugins from dir '%s'" , plugins_dir ) files = [ os . path . join ( plugins_dir , f ) for f in os . listdir ( plugins_dir ) if f . endswith ( ".py" ) ] if self . plugin_files : logger . debug ( "loading additional plugins from files '%s'" , self . plugin_files ) files += self . plugin_files plugin_class = globals ( ) [ plugin_class_name ] plugin_classes = { } for f in files : module_name = os . path . basename ( f ) . rsplit ( '.' , 1 ) [ 0 ] # Do not reload plugins if module_name in sys . modules : f_module = sys . modules [ module_name ] else : try : logger . debug ( "load file '%s'" , f ) f_module = imp . load_source ( module_name , f ) except ( IOError , OSError , ImportError , SyntaxError ) as ex : logger . warning ( "can't load module '%s': %r" , f , ex ) continue for name in dir ( f_module ) : binding = getattr ( f_module , name , None ) try : # if you try to compare binding and PostBuildPlugin, python won't match them # if you call this script directly b/c: # ! <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class # '__main__.PostBuildPlugin'> # but # <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class # 'atomic_reactor.plugin.PostBuildPlugin'> is_sub = issubclass ( binding , plugin_class ) except TypeError : is_sub = False if binding and is_sub and plugin_class . __name__ != binding . __name__ : plugin_classes [ binding . key ] = binding return plugin_classes
load all available plugins
478
4
28,469
def get_available_plugins ( self ) : available_plugins = [ ] PluginData = namedtuple ( 'PluginData' , 'name, plugin_class, conf, is_allowed_to_fail' ) for plugin_request in self . plugins_conf : plugin_name = plugin_request [ 'name' ] try : plugin_class = self . plugin_classes [ plugin_name ] except KeyError : if plugin_request . get ( 'required' , True ) : msg = ( "no such plugin: '%s', did you set " "the correct plugin type?" ) % plugin_name exc = PluginFailedException ( msg ) self . on_plugin_failed ( plugin_name , exc ) logger . error ( msg ) raise exc else : # This plugin is marked as not being required logger . warning ( "plugin '%s' requested but not available" , plugin_name ) continue plugin_is_allowed_to_fail = plugin_request . get ( 'is_allowed_to_fail' , getattr ( plugin_class , "is_allowed_to_fail" , True ) ) plugin_conf = plugin_request . get ( "args" , { } ) plugin = PluginData ( plugin_name , plugin_class , plugin_conf , plugin_is_allowed_to_fail ) available_plugins . append ( plugin ) return available_plugins
check requested plugins availability and handle missing plugins
295
8
28,470
def run ( self , keep_going = False , buildstep_phase = False ) : failed_msgs = [ ] plugin_successful = False plugin_response = None available_plugins = self . available_plugins for plugin in available_plugins : plugin_successful = False logger . debug ( "running plugin '%s'" , plugin . name ) start_time = datetime . datetime . now ( ) plugin_response = None skip_response = False try : plugin_instance = self . create_instance_from_plugin ( plugin . plugin_class , plugin . conf ) self . save_plugin_timestamp ( plugin . plugin_class . key , start_time ) plugin_response = plugin_instance . run ( ) plugin_successful = True if buildstep_phase : assert isinstance ( plugin_response , BuildResult ) if plugin_response . is_failed ( ) : logger . error ( "Build step plugin %s failed: %s" , plugin . plugin_class . key , plugin_response . fail_reason ) self . on_plugin_failed ( plugin . plugin_class . key , plugin_response . fail_reason ) plugin_successful = False self . plugins_results [ plugin . plugin_class . key ] = plugin_response break except AutoRebuildCanceledException as ex : # if auto rebuild is canceled, then just reraise # NOTE: We need to catch and reraise explicitly, so that the below except clause # doesn't catch this and make PluginFailedException out of it in the end # (calling methods would then need to parse exception message to see if # AutoRebuildCanceledException was raised here) raise except InappropriateBuildStepError : logger . debug ( 'Build step %s is not appropriate' , plugin . plugin_class . key ) # don't put None, in results for InappropriateBuildStepError skip_response = True if not buildstep_phase : raise except Exception as ex : msg = "plugin '%s' raised an exception: %r" % ( plugin . plugin_class . key , ex ) logger . debug ( traceback . format_exc ( ) ) if not plugin . is_allowed_to_fail : self . on_plugin_failed ( plugin . plugin_class . key , ex ) if plugin . is_allowed_to_fail or keep_going : logger . warning ( msg ) logger . info ( "error is not fatal, continuing..." ) if not plugin . is_allowed_to_fail : failed_msgs . append ( msg ) else : logger . error ( msg ) raise PluginFailedException ( msg ) plugin_response = ex try : if start_time : finish_time = datetime . datetime . now ( ) duration = finish_time - start_time seconds = duration . total_seconds ( ) logger . debug ( "plugin '%s' finished in %ds" , plugin . name , seconds ) self . save_plugin_duration ( plugin . plugin_class . key , seconds ) except Exception : logger . exception ( "failed to save plugin duration" ) if not skip_response : self . plugins_results [ plugin . plugin_class . key ] = plugin_response if plugin_successful and buildstep_phase : logger . debug ( 'stopping further execution of plugins ' 'after first successful plugin' ) break if len ( failed_msgs ) == 1 : raise PluginFailedException ( failed_msgs [ 0 ] ) elif len ( failed_msgs ) > 1 : raise PluginFailedException ( "Multiple plugins raised an exception: " + str ( failed_msgs ) ) if not plugin_successful and buildstep_phase and not plugin_response : self . on_plugin_failed ( "BuildStepPlugin" , "No appropriate build step" ) raise PluginFailedException ( "No appropriate build step" ) return self . plugins_results
run all requested plugins
821
4
28,471
def _should_send ( self , rebuild , success , auto_canceled , manual_canceled ) : should_send = False should_send_mapping = { self . MANUAL_SUCCESS : not rebuild and success , self . MANUAL_FAIL : not rebuild and not success , self . MANUAL_CANCELED : not rebuild and manual_canceled , self . AUTO_SUCCESS : rebuild and success , self . AUTO_FAIL : rebuild and not success , self . AUTO_CANCELED : rebuild and auto_canceled } for state in self . send_on : should_send |= should_send_mapping [ state ] return should_send
Return True if any state in self . send_on meets given conditions thus meaning that a notification mail should be sent .
156
24
28,472
def _render_mail ( self , rebuild , success , auto_canceled , manual_canceled ) : subject_template = '%(endstate)s building image %(image_name)s' body_template = '\n' . join ( [ 'Image Name: %(image_name)s' , 'Repositories: %(repositories)s' , 'Status: %(endstate)s' , 'Submitted by: %(user)s' , ] ) # Failed autorebuilds include logs as attachments. # Koji integration stores logs in successful Koji Builds. # Don't include logs in these cases. if self . session and not rebuild : body_template += '\nLogs: %(logs)s' endstate = None if auto_canceled or manual_canceled : endstate = 'Canceled' else : endstate = 'Succeeded' if success else 'Failed' url = self . _get_logs_url ( ) image_name , repos = self . _get_image_name_and_repos ( ) repositories = '' for repo in repos : repositories += '\n ' + repo formatting_dict = { 'repositories' : repositories , 'image_name' : image_name , 'endstate' : endstate , 'user' : '<autorebuild>' if rebuild else self . submitter , 'logs' : url } vcs = self . workflow . source . get_vcs_info ( ) if vcs : body_template = '\n' . join ( [ body_template , 'Source url: %(vcs-url)s' , 'Source ref: %(vcs-ref)s' , ] ) formatting_dict [ 'vcs-url' ] = vcs . vcs_url formatting_dict [ 'vcs-ref' ] = vcs . vcs_ref log_files = None if rebuild and endstate == 'Failed' : log_files = self . _fetch_log_files ( ) return ( subject_template % formatting_dict , body_template % formatting_dict , log_files )
Render and return subject and body of the mail to send .
479
12
28,473
def _send_mail ( self , receivers_list , subject , body , log_files = None ) : if not receivers_list : self . log . info ( 'no valid addresses in requested addresses. Doing nothing' ) return self . log . info ( 'sending notification to %s ...' , receivers_list ) if log_files : msg = MIMEMultipart ( ) msg . attach ( MIMEText ( body ) ) for entry in log_files : log_mime = MIMEBase ( 'application' , "octet-stream" ) log_file = entry [ 0 ] # Output.file log_file . seek ( 0 ) log_mime . set_payload ( log_file . read ( ) ) encoders . encode_base64 ( log_mime ) log_mime . add_header ( 'Content-Disposition' , 'attachment; filename="{}"' . format ( entry [ 1 ] [ 'filename' ] ) ) msg . attach ( log_mime ) else : msg = MIMEText ( body ) msg [ 'Subject' ] = subject msg [ 'From' ] = self . from_address msg [ 'To' ] = ', ' . join ( [ x . strip ( ) for x in receivers_list ] ) s = None try : s = get_smtp_session ( self . workflow , self . smtp_fallback ) s . sendmail ( self . from_address , receivers_list , msg . as_string ( ) ) except ( socket . gaierror , smtplib . SMTPException ) : self . log . error ( 'Error communicating with SMTP server' ) raise finally : if s is not None : s . quit ( )
Actually sends the mail with subject and body and optionanl log_file attachements to all members of receivers_list .
375
26
28,474
def get_platform_metadata ( self , platform , build_annotations ) : # retrieve all the workspace data build_info = get_worker_build_info ( self . workflow , platform ) osbs = build_info . osbs kind = "configmap/" cmlen = len ( kind ) cm_key_tmp = build_annotations [ 'metadata_fragment' ] cm_frag_key = build_annotations [ 'metadata_fragment_key' ] if not cm_key_tmp or not cm_frag_key or cm_key_tmp [ : cmlen ] != kind : msg = "Bad ConfigMap annotations for platform {}" . format ( platform ) self . log . warning ( msg ) raise BadConfigMapError ( msg ) # use the key to get the configmap data and then use the # fragment_key to get the build metadata inside the configmap data # save the worker_build metadata cm_key = cm_key_tmp [ cmlen : ] try : cm_data = osbs . get_config_map ( cm_key ) except Exception : self . log . error ( "Failed to get ConfigMap for platform %s" , platform ) raise metadata = cm_data . get_data_by_key ( cm_frag_key ) defer_removal ( self . workflow , cm_key , osbs ) return metadata
Return the metadata for the given platform .
300
8
28,475
def get_output ( self , worker_metadatas ) : outputs = [ ] has_pulp_pull = PLUGIN_PULP_PULL_KEY in self . workflow . exit_results try : pulp_sync_results = self . workflow . postbuild_results [ PLUGIN_PULP_SYNC_KEY ] crane_registry = pulp_sync_results [ 0 ] except ( KeyError , IndexError ) : crane_registry = None for platform in worker_metadatas : for instance in worker_metadatas [ platform ] [ 'output' ] : instance [ 'buildroot_id' ] = '{}-{}' . format ( platform , instance [ 'buildroot_id' ] ) if instance [ 'type' ] == 'docker-image' : # update image ID with pulp_pull results; # necessary when using Pulp < 2.14. Only do this # when building for a single architecture -- if # building for many, we know Pulp has schema 2 # support. if len ( worker_metadatas ) == 1 and has_pulp_pull : if self . workflow . builder . image_id is not None : instance [ 'extra' ] [ 'docker' ] [ 'id' ] = self . workflow . builder . image_id # update repositories to point to Crane if crane_registry : pulp_pullspecs = [ ] docker = instance [ 'extra' ] [ 'docker' ] for pullspec in docker [ 'repositories' ] : image = ImageName . parse ( pullspec ) image . registry = crane_registry . registry pulp_pullspecs . append ( image . to_str ( ) ) docker [ 'repositories' ] = pulp_pullspecs outputs . append ( instance ) return outputs
Build the output entry of the metadata .
391
8
28,476
def handle_401 ( self , response , repo , * * kwargs ) : if response . status_code != requests . codes . unauthorized : return response auth_info = response . headers . get ( 'www-authenticate' , '' ) if 'bearer' not in auth_info . lower ( ) : return response self . _token_cache [ repo ] = self . _get_token ( auth_info , repo ) # Consume content and release the original connection # to allow our new request to reuse the same one. # This pattern was inspired by the source code of requests.auth.HTTPDigestAuth response . content response . close ( ) retry_request = response . request . copy ( ) extract_cookies_to_jar ( retry_request . _cookies , response . request , response . raw ) retry_request . prepare_cookies ( retry_request . _cookies ) self . _set_header ( retry_request , repo ) retry_response = response . connection . send ( retry_request , * * kwargs ) retry_response . history . append ( response ) retry_response . request = retry_request return retry_response
Fetch Bearer token and retry .
261
9
28,477
def remove_plugins_without_parameters ( self ) : # Compatibility code for dockerfile_content plugin self . remove_plugin ( 'prebuild_plugins' , PLUGIN_DOCKERFILE_CONTENT_KEY , 'dockerfile_content is deprecated, please remove from config' ) if not self . reactor_env : return self . remove_koji_plugins ( ) self . remove_pulp_plugins ( ) if not self . get_value ( 'odcs' ) : self . remove_plugin ( 'prebuild_plugins' , PLUGIN_RESOLVE_COMPOSES_KEY , 'no odcs available' ) if not self . get_value ( 'smtp' ) : self . remove_plugin ( 'exit_plugins' , PLUGIN_SENDMAIL_KEY , 'no mailhost available' ) if not self . get_value ( 'sources_command' ) : self . remove_plugin ( 'prebuild_plugins' , PLUGIN_DISTGIT_FETCH_KEY , 'no sources command' )
This used to be handled in BuildRequest but with REACTOR_CONFIG osbs - client doesn t have enough information .
234
26
28,478
def adjust_for_autorebuild ( self ) : if not is_rebuild ( self . workflow ) : return if self . signing_intent : self . log . info ( 'Autorebuild detected: Ignoring signing_intent plugin parameter' ) self . signing_intent = None if self . compose_ids : self . log . info ( 'Autorebuild detected: Ignoring compose_ids plugin parameter' ) self . compose_ids = tuple ( ) self . all_compose_ids = [ ]
Ignore pre - filled signing_intent and compose_ids for autorebuids
108
17
28,479
def resolve_signing_intent ( self ) : all_signing_intents = [ self . odcs_config . get_signing_intent_by_keys ( compose_info . get ( 'sigkeys' , [ ] ) ) for compose_info in self . composes_info ] # Because composes_info may contain composes that were passed as # plugin parameters, add the parent signing intent to avoid the # overall signing intent from surpassing parent's. if self . _parent_signing_intent : all_signing_intents . append ( self . _parent_signing_intent ) # Calculate the least restrictive signing intent signing_intent = min ( all_signing_intents , key = lambda x : x [ 'restrictiveness' ] ) self . log . info ( 'Signing intent for build is %s' , signing_intent [ 'name' ] ) self . compose_config . set_signing_intent ( signing_intent [ 'name' ] )
Determine the correct signing intent
217
7
28,480
def validate_for_request ( self ) : if not self . use_packages and not self . modules and not self . pulp : raise ValueError ( "Nothing to compose (no packages, modules, or enabled pulp repos)" ) if self . packages and not self . koji_tag : raise ValueError ( 'koji_tag is required when packages are used' )
Verify enough information is available for requesting compose .
79
10
28,481
def run ( self ) : path = self . path or CONTAINER_BUILD_JSON_PATH try : with open ( path , 'r' ) as build_cfg_fd : build_cfg_json = json . load ( build_cfg_fd ) except ValueError : self . log . error ( "couldn't decode json from file '%s'" , path ) return None except IOError : self . log . error ( "couldn't read json from file '%s'" , path ) return None else : return self . substitute_configuration ( build_cfg_json )
get json with build config from path
126
7
28,482
def has_operator_manifest ( self ) : dockerfile = df_parser ( self . workflow . builder . df_path , workflow = self . workflow ) labels = Labels ( dockerfile . labels ) try : _ , operator_label = labels . get_name_and_value ( Labels . LABEL_TYPE_OPERATOR_MANIFESTS ) except KeyError : operator_label = 'false' return operator_label . lower ( ) == 'true'
Check if Dockerfile sets the operator manifest label
102
9
28,483
def should_run ( self ) : if self . is_orchestrator ( ) : self . log . warning ( "%s plugin set to run on orchestrator. Skipping" , self . key ) return False if self . operator_manifests_extract_platform != self . platform : self . log . info ( "Only platform [%s] will upload operators metadata. Skipping" , self . operator_manifests_extract_platform ) return False if is_scratch_build ( ) : self . log . info ( "Scratch build. Skipping" ) return False if not self . has_operator_manifest ( ) : self . log . info ( "Operator manifests label not set in Dockerfile. Skipping" ) return False return True
Check if the plugin should run or skip execution .
166
10
28,484
def _read ( self ) : data = b'' for chunk in self . _event_source : for line in chunk . splitlines ( True ) : data += line if data . endswith ( ( b'\r\r' , b'\n\n' , b'\r\n\r\n' ) ) : yield data data = b'' if data : yield data
Read the incoming event source stream and yield event chunks .
84
11
28,485
def check ( wants , has ) : if wants & has == 0 : return False if wants & has < wants : return False return True
Check if a desired scope wants is part of an available scope has .
28
14
28,486
def to_int ( * names , * * kwargs ) : return reduce ( lambda prev , next : ( prev | SCOPE_NAME_DICT . get ( next , 0 ) ) , names , kwargs . pop ( 'default' , 0 ) )
Turns a list of scope names into an integer value .
57
12
28,487
def get_data ( self , request , key = 'params' ) : return request . session . get ( '%s:%s' % ( constants . SESSION_KEY , key ) )
Return stored data from the session store .
42
8
28,488
def cache_data ( self , request , data , key = 'params' ) : request . session [ '%s:%s' % ( constants . SESSION_KEY , key ) ] = data
Cache data in the session store .
43
7
28,489
def clear_data ( self , request ) : for key in request . session . keys ( ) : if key . startswith ( constants . SESSION_KEY ) : del request . session [ key ]
Clear all OAuth related data from the session store .
43
11
28,490
def error_response ( self , request , error , * * kwargs ) : ctx = { } ctx . update ( error ) # If we got a malicious redirect_uri or client_id, remove all the # cached data and tell the resource owner. We will *not* redirect back # to the URL. if error [ 'error' ] in [ 'redirect_uri' , 'unauthorized_client' ] : ctx . update ( next = '/' ) return self . render_to_response ( ctx , * * kwargs ) ctx . update ( next = self . get_redirect_url ( request ) ) return self . render_to_response ( ctx , * * kwargs )
Return an error to be displayed to the resource owner if anything goes awry . Errors can include invalid clients authorization denials and other edge cases such as a wrong redirect_uri in the authorization request .
158
40
28,491
def get_handler ( self , grant_type ) : if grant_type == 'authorization_code' : return self . authorization_code elif grant_type == 'refresh_token' : return self . refresh_token elif grant_type == 'password' : return self . password return None
Return a function or method that is capable handling the grant_type requested by the client or return None to indicate that this type of grant type is not supported resulting in an error response .
65
37
28,492
def get_expire_delta ( self , reference = None ) : if reference is None : reference = now ( ) expiration = self . expires if timezone : if timezone . is_aware ( reference ) and timezone . is_naive ( expiration ) : # MySQL doesn't support timezone for datetime fields # so we assume that the date was stored in the UTC timezone expiration = timezone . make_aware ( expiration , timezone . utc ) elif timezone . is_naive ( reference ) and timezone . is_aware ( expiration ) : reference = timezone . make_aware ( reference , timezone . utc ) timedelta = expiration - reference return timedelta . days * 86400 + timedelta . seconds
Return the number of seconds until this token expires .
159
10
28,493
def _clean_fields ( self ) : try : super ( OAuthForm , self ) . _clean_fields ( ) except OAuthValidationError , e : self . _errors . update ( e . args [ 0 ] )
Overriding the default cleaning behaviour to exit early on errors instead of validating each field .
49
19
28,494
def rfclink ( name , rawtext , text , lineno , inliner , options = { } , content = [ ] ) : node = nodes . reference ( rawtext , "Section " + text , refuri = "%s#section-%s" % ( base_url , text ) ) return [ node ] , [ ]
Link to the OAuth2 draft .
72
8
28,495
def validate ( self , value ) : if self . required and not value : raise OAuthValidationError ( { 'error' : 'invalid_request' } ) # Validate that each value in the value list is in self.choices. for val in value : if not self . valid_value ( val ) : raise OAuthValidationError ( { 'error' : 'invalid_request' , 'error_description' : _ ( "'%s' is not a valid scope." ) % val } )
Validates that the input is a list or tuple .
111
11
28,496
def clean_scope ( self ) : default = SCOPES [ 0 ] [ 0 ] flags = self . cleaned_data . get ( 'scope' , [ ] ) return scope . to_int ( default = default , * flags )
The scope is assembled by combining all the set flags into a single integer value which we can later check again for set bits .
50
25
28,497
def clean ( self ) : data = self . cleaned_data want_scope = data . get ( 'scope' ) or 0 refresh_token = data . get ( 'refresh_token' ) access_token = getattr ( refresh_token , 'access_token' , None ) if refresh_token else None has_scope = access_token . scope if access_token else 0 # Only check if we've actually got a scope in the data # (read: All fields have been cleaned) if want_scope is not 0 and not scope . check ( want_scope , has_scope ) : raise OAuthValidationError ( { 'error' : 'invalid_scope' } ) return data
Make sure that the scope is less or equal to the previous scope!
149
14
28,498
def clean ( self ) : data = self . cleaned_data want_scope = data . get ( 'scope' ) or 0 grant = data . get ( 'grant' ) has_scope = grant . scope if grant else 0 # Only check if we've actually got a scope in the data # (read: All fields have been cleaned) if want_scope is not 0 and not scope . check ( want_scope , has_scope ) : raise OAuthValidationError ( { 'error' : 'invalid_scope' } ) return data
Make sure that the scope is less or equal to the scope allowed on the grant!
116
17
28,499
def short_token ( ) : hash = hashlib . sha1 ( shortuuid . uuid ( ) ) hash . update ( settings . SECRET_KEY ) return hash . hexdigest ( ) [ : : 2 ]
Generate a hash that can be used as an application identifier
49
12