idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
27,900 | def verify_link_in_task_graph ( chain , decision_link , task_link ) : log . info ( "Verifying the {} {} task definition is part of the {} {} task graph..." . format ( task_link . name , task_link . task_id , decision_link . name , decision_link . task_id ) ) if task_link . task_id in decision_link . task_graph : graph_defn = deepcopy ( decision_link . task_graph [ task_link . task_id ] ) verify_task_in_task_graph ( task_link , graph_defn ) log . info ( "Found {} in the graph; it's a match" . format ( task_link . task_id ) ) return raise_on_errors ( [ "Can't find task {} {} in {} {} task-graph.json!" . format ( task_link . name , task_link . task_id , decision_link . name , decision_link . task_id ) ] ) | Compare the runtime task definition against the decision task graph . | 221 | 11 |
27,901 | async def get_pushlog_info ( decision_link ) : source_env_prefix = decision_link . context . config [ 'source_env_prefix' ] repo = get_repo ( decision_link . task , source_env_prefix ) rev = get_revision ( decision_link . task , source_env_prefix ) context = decision_link . context pushlog_url = context . config [ 'pushlog_url' ] . format ( repo = repo , revision = rev ) log . info ( "Pushlog url {}" . format ( pushlog_url ) ) file_path = os . path . join ( context . config [ "work_dir" ] , "{}_push_log.json" . format ( decision_link . name ) ) pushlog_info = await load_json_or_yaml_from_url ( context , pushlog_url , file_path , overwrite = False ) if len ( pushlog_info [ 'pushes' ] ) != 1 : log . warning ( "Pushlog error: expected a single push at {} but got {}!" . format ( pushlog_url , pushlog_info [ 'pushes' ] ) ) return pushlog_info | Get pushlog info for a decision LinkOfTrust . | 263 | 11 |
27,902 | async def get_scm_level ( context , project ) : await context . populate_projects ( ) level = context . projects [ project ] [ 'access' ] . replace ( "scm_level_" , "" ) return level | Get the scm level for a project from projects . yml . | 51 | 14 |
27,903 | async def populate_jsone_context ( chain , parent_link , decision_link , tasks_for ) : task_ids = { "default" : parent_link . task_id , "decision" : decision_link . task_id , } source_url = get_source_url ( decision_link ) project = get_and_check_project ( chain . context . config [ 'valid_vcs_rules' ] , source_url ) log . debug ( "task_ids: {}" . format ( task_ids ) ) jsone_context = { 'now' : parent_link . task [ 'created' ] , 'as_slugid' : lambda x : task_ids . get ( x , task_ids [ 'default' ] ) , 'tasks_for' : tasks_for , 'repository' : { 'url' : get_repo ( decision_link . task , decision_link . context . config [ 'source_env_prefix' ] ) , 'project' : project , } , 'ownTaskId' : parent_link . task_id , 'taskId' : None } if chain . context . config [ 'cot_product' ] in ( 'mobile' , 'application-services' ) : if tasks_for == 'github-release' : jsone_context . update ( await _get_additional_github_releases_jsone_context ( decision_link ) ) elif tasks_for == 'cron' : jsone_context . update ( _get_additional_git_cron_jsone_context ( decision_link ) ) elif tasks_for == 'github-pull-request' : jsone_context . update ( await _get_additional_github_pull_request_jsone_context ( decision_link ) ) elif tasks_for == 'github-push' : jsone_context . update ( await _get_additional_github_push_jsone_context ( decision_link ) ) else : raise CoTError ( 'Unknown tasks_for "{}" for cot_product "mobile"!' . format ( tasks_for ) ) else : jsone_context [ 'repository' ] [ 'level' ] = await get_scm_level ( chain . context , project ) if tasks_for == 'action' : jsone_context . update ( await _get_additional_hg_action_jsone_context ( parent_link , decision_link ) ) elif tasks_for == 'hg-push' : jsone_context . update ( await _get_additional_hg_push_jsone_context ( parent_link , decision_link ) ) elif tasks_for == 'cron' : jsone_context . update ( await _get_additional_hg_cron_jsone_context ( parent_link , decision_link ) ) else : raise CoTError ( "Unknown tasks_for {}!" . format ( tasks_for ) ) log . debug ( "{} json-e context:" . format ( parent_link . name ) ) # format_json() breaks on lambda values; use pprint.pformat here. log . debug ( pprint . pformat ( jsone_context ) ) return jsone_context | Populate the json - e context to rebuild parent_link s task definition . | 717 | 16 |
27,904 | async def get_in_tree_template ( link ) : context = link . context source_url = get_source_url ( link ) if not source_url . endswith ( ( '.yml' , '.yaml' ) ) : raise CoTError ( "{} source url {} doesn't end in .yml or .yaml!" . format ( link . name , source_url ) ) tmpl = await load_json_or_yaml_from_url ( context , source_url , os . path . join ( context . config [ "work_dir" ] , "{}_taskcluster.yml" . format ( link . name ) ) ) return tmpl | Get the in - tree json - e template for a given link . | 152 | 14 |
27,905 | async def get_action_context_and_template ( chain , parent_link , decision_link ) : actions_path = decision_link . get_artifact_full_path ( 'public/actions.json' ) all_actions = load_json_or_yaml ( actions_path , is_path = True ) [ 'actions' ] action_name = get_action_callback_name ( parent_link . task ) action_defn = _get_action_from_actions_json ( all_actions , action_name ) jsone_context = await populate_jsone_context ( chain , parent_link , decision_link , "action" ) if 'task' in action_defn and chain . context . config [ 'min_cot_version' ] <= 2 : tmpl = { 'tasks' : [ action_defn [ 'task' ] ] } elif action_defn . get ( 'kind' ) == 'hook' : # action-hook. in_tree_tmpl = await get_in_tree_template ( decision_link ) action_perm = _get_action_perm ( action_defn ) tmpl = _wrap_action_hook_with_let ( in_tree_tmpl , action_perm ) # define the JSON-e context with which the hook's task template was # rendered, defined at # https://docs.taskcluster.net/docs/reference/core/taskcluster-hooks/docs/firing-hooks#triggerhook # This is created by working backward from the json-e context the # .taskcluster.yml expects jsone_context = { 'payload' : _render_action_hook_payload ( action_defn , jsone_context , parent_link ) , 'taskId' : parent_link . task_id , 'now' : jsone_context [ 'now' ] , 'as_slugid' : jsone_context [ 'as_slugid' ] , 'clientId' : jsone_context . get ( 'clientId' ) , } elif action_defn . get ( 'kind' ) == 'task' : # XXX Get rid of this block when all actions are hooks tmpl = await get_in_tree_template ( decision_link ) for k in ( 'action' , 'push' , 'repository' ) : jsone_context [ k ] = deepcopy ( action_defn [ 'hookPayload' ] [ 'decision' ] . get ( k , { } ) ) jsone_context [ 'action' ] [ 'repo_scope' ] = get_repo_scope ( parent_link . task , parent_link . name ) else : raise CoTError ( 'Unknown action kind `{kind}` for action `{name}`.' . format ( kind = action_defn . get ( 'kind' , '<MISSING>' ) , name = action_defn . get ( 'name' , '<MISSING>' ) , ) ) return jsone_context , tmpl | Get the appropriate json - e context and template for an action task . | 683 | 14 |
27,906 | async def get_jsone_context_and_template ( chain , parent_link , decision_link , tasks_for ) : if tasks_for == 'action' : jsone_context , tmpl = await get_action_context_and_template ( chain , parent_link , decision_link ) else : tmpl = await get_in_tree_template ( decision_link ) jsone_context = await populate_jsone_context ( chain , parent_link , decision_link , tasks_for ) return jsone_context , tmpl | Get the appropriate json - e context and template for any parent task . | 123 | 14 |
27,907 | def check_and_update_action_task_group_id ( parent_link , decision_link , rebuilt_definitions ) : rebuilt_gid = rebuilt_definitions [ 'tasks' ] [ 0 ] [ 'payload' ] [ 'env' ] [ 'ACTION_TASK_GROUP_ID' ] runtime_gid = parent_link . task [ 'payload' ] [ 'env' ] [ 'ACTION_TASK_GROUP_ID' ] acceptable_gids = { parent_link . task_id , decision_link . task_id } if rebuilt_gid not in acceptable_gids : raise CoTError ( "{} ACTION_TASK_GROUP_ID {} not in {}!" . format ( parent_link . name , rebuilt_gid , acceptable_gids ) ) if runtime_gid != rebuilt_gid : log . debug ( "runtime gid {} rebuilt gid {}" . format ( runtime_gid , rebuilt_gid ) ) rebuilt_definitions [ 'tasks' ] [ 0 ] [ 'payload' ] [ 'env' ] [ 'ACTION_TASK_GROUP_ID' ] = runtime_gid | Update the ACTION_TASK_GROUP_ID of an action after verifying . | 261 | 17 |
27,908 | def compare_jsone_task_definition ( parent_link , rebuilt_definitions ) : diffs = [ ] for compare_definition in rebuilt_definitions [ 'tasks' ] : # Rebuilt decision tasks have an extra `taskId`; remove if 'taskId' in compare_definition : del ( compare_definition [ 'taskId' ] ) # remove key/value pairs where the value is empty, since json-e drops # them instead of keeping them with a None/{}/[] value. compare_definition = remove_empty_keys ( compare_definition ) runtime_definition = remove_empty_keys ( parent_link . task ) diff = list ( dictdiffer . diff ( compare_definition , runtime_definition ) ) if diff : diffs . append ( pprint . pformat ( diff ) ) continue log . info ( "{}: Good." . format ( parent_link . name ) ) break else : error_msg = "{} {}: the runtime task doesn't match any rebuilt definition!\n{}" . format ( parent_link . name , parent_link . task_id , pprint . pformat ( diffs ) ) log . critical ( error_msg ) raise CoTError ( error_msg ) | Compare the json - e rebuilt task definition vs the runtime definition . | 264 | 13 |
27,909 | async def verify_parent_task ( chain , link ) : worker_type = get_worker_type ( link . task ) if worker_type not in chain . context . config [ 'valid_decision_worker_types' ] : raise CoTError ( "{} is not a valid decision workerType!" . format ( worker_type ) ) if chain is not link : # make sure all tasks generated from this parent task match the published # task-graph.json. Not applicable if this link is the ChainOfTrust object, # since this task won't have generated a task-graph.json yet. path = link . get_artifact_full_path ( 'public/task-graph.json' ) if not os . path . exists ( path ) : raise CoTError ( "{} {}: {} doesn't exist!" . format ( link . name , link . task_id , path ) ) link . task_graph = load_json_or_yaml ( path , is_path = True , exception = CoTError , message = "Can't load {}! %(exc)s" . format ( path ) ) # This check may want to move to a per-task check? for target_link in chain . get_all_links_in_chain ( ) : # Verify the target's task is in the parent task's task graph, unless # it's this task or a parent task. # (Decision tasks will not exist in a parent task's task-graph.json; # action tasks, which are generated later, will also be missing.) # https://github.com/mozilla-releng/scriptworker/issues/77 if target_link . parent_task_id == link . task_id and target_link . task_id != link . task_id and target_link . task_type not in PARENT_TASK_TYPES : verify_link_in_task_graph ( chain , link , target_link ) try : await verify_parent_task_definition ( chain , link ) except ( BaseDownloadError , KeyError ) as e : raise CoTError ( e ) | Verify the parent task Link . | 456 | 7 |
27,910 | async def verify_docker_image_task ( chain , link ) : errors = [ ] # workerType worker_type = get_worker_type ( link . task ) if worker_type not in chain . context . config [ 'valid_docker_image_worker_types' ] : errors . append ( "{} is not a valid docker-image workerType!" . format ( worker_type ) ) raise_on_errors ( errors ) | Verify the docker image Link . | 94 | 7 |
27,911 | def check_num_tasks ( chain , task_count ) : errors = [ ] # hardcode for now. If we need a different set of constraints, either # go by cot_product settings or by task_count['docker-image'] + 1 min_decision_tasks = 1 if task_count [ 'decision' ] < min_decision_tasks : errors . append ( "{} decision tasks; we must have at least {}!" . format ( task_count [ 'decision' ] , min_decision_tasks ) ) raise_on_errors ( errors ) | Make sure there are a specific number of specific task types . | 129 | 12 |
27,912 | async def verify_docker_worker_task ( chain , link ) : if chain != link : # These two checks will die on `link.cot` if `link` is a ChainOfTrust # object (e.g., the task we're running `verify_cot` against is a # docker-worker task). So only run these tests if they are not the chain # object. check_interactive_docker_worker ( link ) verify_docker_image_sha ( chain , link ) | Docker - worker specific checks . | 106 | 7 |
27,913 | async def verify_scriptworker_task ( chain , obj ) : errors = [ ] if obj . worker_impl != "scriptworker" : errors . append ( "{} {} must be run from scriptworker!" . format ( obj . name , obj . task_id ) ) raise_on_errors ( errors ) | Verify the signing trust object . | 67 | 7 |
27,914 | def verify_repo_matches_url ( repo , url ) : repo_parts = urlparse ( repo ) url_parts = urlparse ( url ) errors = [ ] repo_path_parts = repo_parts . path . split ( '/' ) url_path_parts = url_parts . path . split ( '/' ) if repo_parts . hostname != url_parts . hostname : errors . append ( "verify_repo_matches_url: Hostnames don't match! {} {}" . format ( repo_parts . hostname , url_parts . hostname ) ) if not url_parts . path . startswith ( repo_parts . path ) or url_path_parts [ : len ( repo_path_parts ) ] != repo_path_parts : errors . append ( "verify_repo_matches_url: Paths don't match! {} {}" . format ( repo_parts . path , url_parts . path ) ) if errors : log . warning ( "\n" . join ( errors ) ) return False return True | Verify url is a part of repo . | 233 | 9 |
27,915 | def get_source_url ( obj ) : source_env_prefix = obj . context . config [ 'source_env_prefix' ] task = obj . task log . debug ( "Getting source url for {} {}..." . format ( obj . name , obj . task_id ) ) repo = get_repo ( obj . task , source_env_prefix = source_env_prefix ) source = task [ 'metadata' ] [ 'source' ] if repo and not verify_repo_matches_url ( repo , source ) : raise CoTError ( "{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!" . format ( name = obj . name , task_id = obj . task_id , source_env_prefix = source_env_prefix , repo = repo , source = source ) ) log . info ( "{} {}: found {}" . format ( obj . name , obj . task_id , source ) ) return source | Get the source url for a Trust object . | 217 | 9 |
27,916 | async def trace_back_to_tree ( chain ) : errors = [ ] repos = { } restricted_privs = None rules = { } for my_key , config_key in { 'scopes' : 'cot_restricted_scopes' , 'trees' : 'cot_restricted_trees' } . items ( ) : rules [ my_key ] = chain . context . config [ config_key ] # a repo_path of None means we have no restricted privs. # a string repo_path may mean we have higher privs for obj in [ chain ] + chain . links : source_url = get_source_url ( obj ) repo_path = match_url_regex ( chain . context . config [ 'valid_vcs_rules' ] , source_url , match_url_path_callback ) repos [ obj ] = repo_path # check for restricted scopes. my_repo = repos [ chain ] for scope in chain . task [ 'scopes' ] : if scope in rules [ 'scopes' ] : log . info ( "Found privileged scope {}" . format ( scope ) ) restricted_privs = True level = rules [ 'scopes' ] [ scope ] if my_repo not in rules [ 'trees' ] [ level ] : errors . append ( "{} {}: repo {} not allowlisted for scope {}!" . format ( chain . name , chain . task_id , my_repo , scope ) ) # verify all tasks w/ same decision_task_id have the same source repo. if len ( set ( repos . values ( ) ) ) > 1 : for obj , repo in repos . items ( ) : if obj . decision_task_id == chain . decision_task_id : if repo != my_repo : errors . append ( "{} {} repo {} doesn't match my repo {}!" . format ( obj . name , obj . task_id , repo , my_repo ) ) # if we have restricted privs, the non-sibling tasks must at least be in # a known repo. # (Not currently requiring that all tasks have the same privilege level, # in case a docker-image build is run on mozilla-central and that image # is used for a release-priv task, for example.) elif restricted_privs and repo is None : errors . append ( "{} {} has no privileged repo on an restricted privilege scope!" . format ( obj . name , obj . task_id ) ) # Disallow restricted privs on is_try_or_pull_request. This may be a redundant check. if restricted_privs and await chain . is_try_or_pull_request ( ) : errors . append ( "{} {} has restricted privilege scope, and is_try_or_pull_request()!" . format ( chain . name , chain . task_id ) ) raise_on_errors ( errors ) | Trace the chain back to the tree . | 633 | 9 |
27,917 | async def verify_chain_of_trust ( chain ) : log_path = os . path . join ( chain . context . config [ "task_log_dir" ] , "chain_of_trust.log" ) scriptworker_log = logging . getLogger ( 'scriptworker' ) with contextual_log_handler ( chain . context , path = log_path , log_obj = scriptworker_log , formatter = AuditLogFormatter ( fmt = chain . context . config [ 'log_fmt' ] , datefmt = chain . context . config [ 'log_datefmt' ] , ) ) : try : # build LinkOfTrust objects await build_task_dependencies ( chain , chain . task , chain . name , chain . task_id ) # download the signed chain of trust artifacts await download_cot ( chain ) # verify the signatures and populate the ``link.cot``s verify_cot_signatures ( chain ) # download all other artifacts needed to verify chain of trust await download_cot_artifacts ( chain ) # verify the task types, e.g. decision task_count = await verify_task_types ( chain ) check_num_tasks ( chain , task_count ) # verify the worker_impls, e.g. docker-worker await verify_worker_impls ( chain ) await trace_back_to_tree ( chain ) except ( BaseDownloadError , KeyError , AttributeError ) as exc : log . critical ( "Chain of Trust verification error!" , exc_info = True ) if isinstance ( exc , CoTError ) : raise else : raise CoTError ( str ( exc ) ) log . info ( "Good." ) | Build and verify the chain of trust . | 365 | 8 |
27,918 | async def is_try_or_pull_request ( self ) : tasks = [ asyncio . ensure_future ( link . is_try_or_pull_request ( ) ) for link in self . links ] tasks . insert ( 0 , asyncio . ensure_future ( is_try_or_pull_request ( self . context , self . task ) ) ) conditions = await raise_future_exceptions ( tasks ) return any ( conditions ) | Determine if any task in the chain is a try task . | 97 | 14 |
27,919 | def get_link ( self , task_id ) : links = [ x for x in self . links if x . task_id == task_id ] if len ( links ) != 1 : raise CoTError ( "No single Link matches task_id {}!\n{}" . format ( task_id , self . dependent_task_ids ( ) ) ) return links [ 0 ] | Get a LinkOfTrust by task id . | 83 | 9 |
27,920 | def get_all_links_in_chain ( self ) : if self . is_decision ( ) and self . get_link ( self . task_id ) : return self . links return [ self ] + self . links | Return all links in the chain of trust including the target task . | 49 | 13 |
27,921 | def format ( self , record ) : if record . levelno == logging . DEBUG : record . msg = ' {}' . format ( record . msg ) return super ( AuditLogFormatter , self ) . format ( record ) | Space debug messages for more legibility . | 47 | 8 |
27,922 | def get_version_string ( version ) : version_len = len ( version ) if version_len == 3 : version_string = '%d.%d.%d' % version elif version_len == 4 : version_string = '%d.%d.%d-%s' % version else : raise Exception ( 'Version tuple is non-semver-compliant {} length!' . format ( version_len ) ) return version_string | Translate a version tuple into a string . | 99 | 9 |
27,923 | def get_unfrozen_copy ( values ) : if isinstance ( values , ( frozendict , dict ) ) : return { key : get_unfrozen_copy ( value ) for key , value in values . items ( ) } elif isinstance ( values , ( list , tuple ) ) : return [ get_unfrozen_copy ( value ) for value in values ] # Nothing to unfreeze. return values | Recursively convert value s tuple values into lists and frozendicts into dicts . | 93 | 19 |
27,924 | def read_worker_creds ( key = "credentials" ) : for path in CREDS_FILES : if not os . path . exists ( path ) : continue contents = load_json_or_yaml ( path , is_path = True , exception = None ) if contents . get ( key ) : return contents [ key ] else : if key == "credentials" and os . environ . get ( "TASKCLUSTER_ACCESS_TOKEN" ) and os . environ . get ( "TASKCLUSTER_CLIENT_ID" ) : credentials = { "accessToken" : os . environ [ "TASKCLUSTER_ACCESS_TOKEN" ] , "clientId" : os . environ [ "TASKCLUSTER_CLIENT_ID" ] , } if os . environ . get ( "TASKCLUSTER_CERTIFICATE" ) : credentials [ 'certificate' ] = os . environ [ 'TASKCLUSTER_CERTIFICATE' ] return credentials | Get credentials from CREDS_FILES or the environment . | 239 | 13 |
27,925 | def check_config ( config , path ) : messages = [ ] config_copy = get_frozen_copy ( config ) missing_keys = set ( DEFAULT_CONFIG . keys ( ) ) - set ( config_copy . keys ( ) ) if missing_keys : messages . append ( "Missing config keys {}!" . format ( missing_keys ) ) for key , value in config_copy . items ( ) : if key not in DEFAULT_CONFIG : messages . append ( "Unknown key {} in {}!" . format ( key , path ) ) continue if value is None : messages . append ( _VALUE_UNDEFINED_MESSAGE . format ( path = path , key = key ) ) else : value_type = type ( value ) if isinstance ( DEFAULT_CONFIG [ key ] , Mapping ) and 'by-cot-product' in DEFAULT_CONFIG [ key ] : default_type = type ( DEFAULT_CONFIG [ key ] [ 'by-cot-product' ] [ config [ 'cot_product' ] ] ) else : default_type = type ( DEFAULT_CONFIG [ key ] ) if value_type is not default_type : messages . append ( "{} {}: type {} is not {}!" . format ( path , key , value_type , default_type ) ) if value in ( "..." , b"..." ) : messages . append ( _VALUE_UNDEFINED_MESSAGE . format ( path = path , key = key ) ) if key in ( "provisioner_id" , "worker_group" , "worker_type" , "worker_id" ) and not _is_id_valid ( value ) : messages . append ( '{} doesn\'t match "{}" (required by Taskcluster)' . format ( key , _GENERIC_ID_REGEX . pattern ) ) return messages | Validate the config against DEFAULT_CONFIG . | 410 | 11 |
27,926 | def apply_product_config ( config ) : cot_product = config [ 'cot_product' ] for key in config : if isinstance ( config [ key ] , Mapping ) and 'by-cot-product' in config [ key ] : try : config [ key ] = config [ key ] [ 'by-cot-product' ] [ cot_product ] except KeyError : raise ConfigError ( "Product {} not specified for key {}" . format ( cot_product , key ) ) return config | Apply config values that are keyed by cot_product . | 110 | 13 |
27,927 | def create_config ( config_path = "scriptworker.yaml" ) : if not os . path . exists ( config_path ) : print ( "{} doesn't exist! Exiting..." . format ( config_path ) , file = sys . stderr ) sys . exit ( 1 ) with open ( config_path , "r" , encoding = "utf-8" ) as fh : secrets = safe_load ( fh ) config = dict ( deepcopy ( DEFAULT_CONFIG ) ) if not secrets . get ( "credentials" ) : secrets [ 'credentials' ] = read_worker_creds ( ) config . update ( secrets ) apply_product_config ( config ) messages = check_config ( config , config_path ) if messages : print ( '\n' . join ( messages ) , file = sys . stderr ) print ( "Exiting..." , file = sys . stderr ) sys . exit ( 1 ) credentials = get_frozen_copy ( secrets [ 'credentials' ] ) del ( config [ 'credentials' ] ) config = get_frozen_copy ( config ) return config , credentials | Create a config from DEFAULT_CONFIG arguments and config file . | 255 | 14 |
27,928 | def get_context_from_cmdln ( args , desc = "Run scriptworker" ) : context = Context ( ) parser = argparse . ArgumentParser ( description = desc ) parser . add_argument ( "config_path" , type = str , nargs = "?" , default = "scriptworker.yaml" , help = "the path to the config file" ) parsed_args = parser . parse_args ( args ) context . config , credentials = create_config ( config_path = parsed_args . config_path ) update_logging_config ( context ) return context , credentials | Create a Context object from args . | 128 | 7 |
27,929 | def get_cot_artifacts ( context ) : artifacts = { } filepaths = filepaths_in_dir ( context . config [ 'artifact_dir' ] ) hash_alg = context . config [ 'chain_of_trust_hash_algorithm' ] for filepath in sorted ( filepaths ) : path = os . path . join ( context . config [ 'artifact_dir' ] , filepath ) sha = get_hash ( path , hash_alg = hash_alg ) artifacts [ filepath ] = { hash_alg : sha } return artifacts | Generate the artifact relative paths and shas for the chain of trust . | 126 | 15 |
27,930 | def generate_cot_body ( context ) : try : cot = { 'artifacts' : get_cot_artifacts ( context ) , 'chainOfTrustVersion' : 1 , 'runId' : context . claim_task [ 'runId' ] , 'task' : context . task , 'taskId' : context . claim_task [ 'status' ] [ 'taskId' ] , 'workerGroup' : context . claim_task [ 'workerGroup' ] , 'workerId' : context . config [ 'worker_id' ] , 'workerType' : context . config [ 'worker_type' ] , 'environment' : get_cot_environment ( context ) , } except ( KeyError , ) as exc : raise ScriptWorkerException ( "Can't generate chain of trust! {}" . format ( str ( exc ) ) ) return cot | Generate the chain of trust dictionary . | 185 | 8 |
27,931 | def generate_cot ( context , parent_path = None ) : body = generate_cot_body ( context ) schema = load_json_or_yaml ( context . config [ 'cot_schema_path' ] , is_path = True , exception = ScriptWorkerException , message = "Can't read schema file {}: %(exc)s" . format ( context . config [ 'cot_schema_path' ] ) ) validate_json_schema ( body , schema , name = "chain of trust" ) body = format_json ( body ) parent_path = parent_path or os . path . join ( context . config [ 'artifact_dir' ] , 'public' ) unsigned_path = os . path . join ( parent_path , 'chain-of-trust.json' ) write_to_file ( unsigned_path , body ) if context . config [ 'sign_chain_of_trust' ] : ed25519_signature_path = '{}.sig' . format ( unsigned_path ) ed25519_private_key = ed25519_private_key_from_file ( context . config [ 'ed25519_private_key_path' ] ) ed25519_signature = ed25519_private_key . sign ( body . encode ( 'utf-8' ) ) write_to_file ( ed25519_signature_path , ed25519_signature , file_type = 'binary' ) return body | Format and sign the cot body and write to disk . | 324 | 12 |
27,932 | def is_github_repo_owner_the_official_one ( context , repo_owner ) : official_repo_owner = context . config [ 'official_github_repos_owner' ] if not official_repo_owner : raise ConfigError ( 'This worker does not have a defined owner for official GitHub repositories. ' 'Given "official_github_repos_owner": {}' . format ( official_repo_owner ) ) return official_repo_owner == repo_owner | Given a repo_owner check if it matches the one configured to be the official one . | 109 | 18 |
27,933 | def get_tag_hash ( self , tag_name ) : tag_object = get_single_item_from_sequence ( sequence = self . _github_repository . tags ( ) , condition = lambda tag : tag . name == tag_name , no_item_error_message = 'No tag "{}" exist' . format ( tag_name ) , too_many_item_error_message = 'Too many tags "{}" found' . format ( tag_name ) , ) return tag_object . commit . sha | Fetch the commit hash that was tagged with tag_name . | 115 | 13 |
27,934 | async def has_commit_landed_on_repository ( self , context , revision ) : # Revision may be a tag name. `branch_commits` doesn't work on tags if not _is_git_full_hash ( revision ) : revision = self . get_tag_hash ( tag_name = revision ) repo = self . _github_repository . html_url url = '/' . join ( [ repo . rstrip ( '/' ) , 'branch_commits' , revision ] ) html_data = await retry_request ( context , url ) html_text = html_data . strip ( ) # https://github.com/{repo_owner}/{repo_name}/branch_commits/{revision} just returns some \n # when the commit hasn't landed on the origin repo. Otherwise, some HTML data is returned - it # represents the branches on which the given revision is present. return html_text != '' | Tell if a commit was landed on the repository or if it just comes from a pull request . | 215 | 19 |
27,935 | def update_logging_config ( context , log_name = None , file_name = 'worker.log' ) : log_name = log_name or __name__ . split ( '.' ) [ 0 ] top_level_logger = logging . getLogger ( log_name ) datefmt = context . config [ 'log_datefmt' ] fmt = context . config [ 'log_fmt' ] formatter = logging . Formatter ( fmt = fmt , datefmt = datefmt ) if context . config . get ( "verbose" ) : top_level_logger . setLevel ( logging . DEBUG ) if len ( top_level_logger . handlers ) == 0 : handler = logging . StreamHandler ( ) handler . setFormatter ( formatter ) top_level_logger . addHandler ( handler ) else : top_level_logger . setLevel ( logging . INFO ) # Rotating log file makedirs ( context . config [ 'log_dir' ] ) path = os . path . join ( context . config [ 'log_dir' ] , file_name ) if context . config [ "watch_log_file" ] : # If we rotate the log file via logrotate.d, let's watch the file # so we can automatically close/reopen on move. handler = logging . handlers . WatchedFileHandler ( path ) else : # Avoid using WatchedFileHandler during scriptworker unittests handler = logging . FileHandler ( path ) handler . setFormatter ( formatter ) top_level_logger . addHandler ( handler ) top_level_logger . addHandler ( logging . NullHandler ( ) ) | Update python logging settings from config . | 363 | 7 |
27,936 | async def pipe_to_log ( pipe , filehandles = ( ) , level = logging . INFO ) : while True : line = await pipe . readline ( ) if line : line = to_unicode ( line ) log . log ( level , line . rstrip ( ) ) for filehandle in filehandles : print ( line , file = filehandle , end = "" ) else : break | Log from a subprocess PIPE . | 86 | 9 |
27,937 | def get_log_filehandle ( context ) : log_file_name = get_log_filename ( context ) makedirs ( context . config [ 'task_log_dir' ] ) with open ( log_file_name , "w" , encoding = "utf-8" ) as filehandle : yield filehandle | Open the log and error filehandles . | 70 | 9 |
27,938 | def contextual_log_handler ( context , path , log_obj = None , level = logging . DEBUG , formatter = None ) : log_obj = log_obj or log formatter = formatter or logging . Formatter ( fmt = context . config [ 'log_fmt' ] , datefmt = context . config [ 'log_datefmt' ] , ) parent_path = os . path . dirname ( path ) makedirs ( parent_path ) contextual_handler = logging . FileHandler ( path , encoding = 'utf-8' ) contextual_handler . setLevel ( level ) contextual_handler . setFormatter ( formatter ) log_obj . addHandler ( contextual_handler ) yield contextual_handler . close ( ) log_obj . removeHandler ( contextual_handler ) | Add a short - lived log with a contextmanager for cleanup . | 171 | 13 |
27,939 | async def upload_artifacts ( context , files ) : def to_upload_future ( target_path ) : path = os . path . join ( context . config [ 'artifact_dir' ] , target_path ) content_type , content_encoding = compress_artifact_if_supported ( path ) return asyncio . ensure_future ( retry_create_artifact ( context , path , target_path = target_path , content_type = content_type , content_encoding = content_encoding , ) ) tasks = list ( map ( to_upload_future , files ) ) await raise_future_exceptions ( tasks ) | Compress and upload the requested files from artifact_dir preserving relative paths . | 141 | 15 |
27,940 | def compress_artifact_if_supported ( artifact_path ) : content_type , encoding = guess_content_type_and_encoding ( artifact_path ) log . debug ( '"{}" is encoded with "{}" and has mime/type "{}"' . format ( artifact_path , encoding , content_type ) ) if encoding is None and content_type in _GZIP_SUPPORTED_CONTENT_TYPE : log . info ( '"{}" can be gzip\'d. Compressing...' . format ( artifact_path ) ) with open ( artifact_path , 'rb' ) as f_in : text_content = f_in . read ( ) with gzip . open ( artifact_path , 'wb' ) as f_out : f_out . write ( text_content ) encoding = 'gzip' log . info ( '"{}" compressed' . format ( artifact_path ) ) else : log . debug ( '"{}" is not supported for compression.' . format ( artifact_path ) ) return content_type , encoding | Compress artifacts with GZip if they re known to be supported . | 232 | 14 |
27,941 | def guess_content_type_and_encoding ( path ) : for ext , content_type in _EXTENSION_TO_MIME_TYPE . items ( ) : if path . endswith ( ext ) : return content_type content_type , encoding = mimetypes . guess_type ( path ) content_type = content_type or "application/binary" return content_type , encoding | Guess the content type of a path using mimetypes . | 87 | 13 |
27,942 | async def create_artifact ( context , path , target_path , content_type , content_encoding , storage_type = 's3' , expires = None ) : payload = { "storageType" : storage_type , "expires" : expires or get_expiration_arrow ( context ) . isoformat ( ) , "contentType" : content_type , } args = [ get_task_id ( context . claim_task ) , get_run_id ( context . claim_task ) , target_path , payload ] tc_response = await context . temp_queue . createArtifact ( * args ) skip_auto_headers = [ aiohttp . hdrs . CONTENT_TYPE ] loggable_url = get_loggable_url ( tc_response [ 'putUrl' ] ) log . info ( "uploading {path} to {url}..." . format ( path = path , url = loggable_url ) ) with open ( path , "rb" ) as fh : async with async_timeout . timeout ( context . config [ 'artifact_upload_timeout' ] ) : async with context . session . put ( tc_response [ 'putUrl' ] , data = fh , headers = _craft_artifact_put_headers ( content_type , content_encoding ) , skip_auto_headers = skip_auto_headers , compress = False ) as resp : log . info ( "create_artifact {}: {}" . format ( path , resp . status ) ) response_text = await resp . text ( ) log . info ( response_text ) if resp . status not in ( 200 , 204 ) : raise ScriptWorkerRetryException ( "Bad status {}" . format ( resp . status ) , ) | Create an artifact and upload it . | 386 | 7 |
27,943 | def get_artifact_url ( context , task_id , path ) : if path . startswith ( "public/" ) : url = context . queue . buildUrl ( 'getLatestArtifact' , task_id , path ) else : url = context . queue . buildSignedUrl ( 'getLatestArtifact' , task_id , path , # XXX Can set expiration kwarg in (int) seconds from now; # defaults to 15min. ) return url | Get a TaskCluster artifact url . | 102 | 8 |
27,944 | async def download_artifacts ( context , file_urls , parent_dir = None , session = None , download_func = download_file , valid_artifact_task_ids = None ) : parent_dir = parent_dir or context . config [ 'work_dir' ] session = session or context . session tasks = [ ] files = [ ] valid_artifact_rules = context . config [ 'valid_artifact_rules' ] # XXX when chain of trust is on everywhere, hardcode the chain of trust task list valid_artifact_task_ids = valid_artifact_task_ids or list ( context . task [ 'dependencies' ] + [ get_decision_task_id ( context . task ) ] ) for file_url in file_urls : rel_path = validate_artifact_url ( valid_artifact_rules , valid_artifact_task_ids , file_url ) abs_file_path = os . path . join ( parent_dir , rel_path ) files . append ( abs_file_path ) tasks . append ( asyncio . ensure_future ( retry_async ( download_func , args = ( context , file_url , abs_file_path ) , retry_exceptions = ( DownloadError , aiohttp . ClientError , asyncio . TimeoutError ) , kwargs = { 'session' : session } , ) ) ) await raise_future_exceptions ( tasks ) return files | Download artifacts in parallel after validating their URLs . | 321 | 10 |
27,945 | def get_upstream_artifacts_full_paths_per_task_id ( context ) : upstream_artifacts = context . task [ 'payload' ] [ 'upstreamArtifacts' ] task_ids_and_relative_paths = [ ( artifact_definition [ 'taskId' ] , artifact_definition [ 'paths' ] ) for artifact_definition in upstream_artifacts ] optional_artifacts_per_task_id = get_optional_artifacts_per_task_id ( upstream_artifacts ) upstream_artifacts_full_paths_per_task_id = { } failed_paths_per_task_id = { } for task_id , paths in task_ids_and_relative_paths : for path in paths : try : path_to_add = get_and_check_single_upstream_artifact_full_path ( context , task_id , path ) add_enumerable_item_to_dict ( dict_ = upstream_artifacts_full_paths_per_task_id , key = task_id , item = path_to_add ) except ScriptWorkerTaskException : if path in optional_artifacts_per_task_id . get ( task_id , [ ] ) : log . warning ( 'Optional artifact "{}" of task "{}" not found' . format ( path , task_id ) ) add_enumerable_item_to_dict ( dict_ = failed_paths_per_task_id , key = task_id , item = path ) else : raise return upstream_artifacts_full_paths_per_task_id , failed_paths_per_task_id | List the downloaded upstream artifacts . | 361 | 6 |
27,946 | def get_and_check_single_upstream_artifact_full_path ( context , task_id , path ) : abs_path = get_single_upstream_artifact_full_path ( context , task_id , path ) if not os . path . exists ( abs_path ) : raise ScriptWorkerTaskException ( 'upstream artifact with path: {}, does not exist' . format ( abs_path ) ) return abs_path | Return the full path where an upstream artifact is located on disk . | 99 | 13 |
27,947 | def get_single_upstream_artifact_full_path ( context , task_id , path ) : return os . path . abspath ( os . path . join ( context . config [ 'work_dir' ] , 'cot' , task_id , path ) ) | Return the full path where an upstream artifact should be located . | 60 | 12 |
27,948 | def get_optional_artifacts_per_task_id ( upstream_artifacts ) : # A given taskId might be defined many times in upstreamArtifacts. Thus, we can't # use a dict comprehension optional_artifacts_per_task_id = { } for artifact_definition in upstream_artifacts : if artifact_definition . get ( 'optional' , False ) is True : task_id = artifact_definition [ 'taskId' ] artifacts_paths = artifact_definition [ 'paths' ] add_enumerable_item_to_dict ( dict_ = optional_artifacts_per_task_id , key = task_id , item = artifacts_paths ) return optional_artifacts_per_task_id | Return every optional artifact defined in upstream_artifacts ordered by taskId . | 154 | 14 |
27,949 | def set_chat_description ( chat_id , description , * * kwargs ) : if len ( description ) > 255 : raise ValueError ( "Chat description must be less than 255 characters." ) # required args params = dict ( chat_id = chat_id , description = description ) return TelegramBotRPCRequest ( 'setChatTitle' , params = params , on_result = lambda result : result , * * kwargs ) | Use this method to change the description of a supergroup or a channel . The bot must be an administrator in the chat for this to work and must have the appropriate admin rights . Returns True on success . | 94 | 41 |
27,950 | def send_audio ( chat_id , audio , caption = None , duration = None , performer = None , title = None , reply_to_message_id = None , reply_markup = None , disable_notification = False , parse_mode = None , * * kwargs ) : files = None if isinstance ( audio , InputFile ) : files = [ audio ] audio = None elif not isinstance ( audio , str ) : raise Exception ( 'audio must be instance of InputFile or str' ) # required args params = dict ( chat_id = chat_id , audio = audio ) # optional args params . update ( _clean_params ( caption = caption , duration = duration , performer = performer , title = title , reply_to_message_id = reply_to_message_id , reply_markup = reply_markup , disable_notification = disable_notification , parse_mode = parse_mode , ) ) return TelegramBotRPCRequest ( 'sendAudio' , params = params , files = files , on_result = Message . from_result , * * kwargs ) | Use this method to send audio files if you want Telegram clients to display them in the music player . | 240 | 20 |
27,951 | def unban_chat_member ( chat_id , user_id , * * kwargs ) : # required args params = dict ( chat_id = chat_id , user_id = user_id , ) return TelegramBotRPCRequest ( 'unbanChatMember' , params = params , on_result = lambda result : result , * * kwargs ) | Use this method to unban a previously kicked user in a supergroup . The user will not return to the group automatically but will be able to join via link etc . The bot must be an administrator in the group for this to work | 80 | 47 |
27,952 | def get_chat_member ( chat_id , user_id , * * kwargs ) : # required args params = dict ( chat_id = chat_id , user_id = user_id , ) return TelegramBotRPCRequest ( 'getChatMember' , params = params , on_result = lambda result : ChatMember . from_result ( result ) , * * kwargs ) | Use this method to get information about a member of a chat | 86 | 12 |
27,953 | def get_file ( file_id , * * kwargs ) : # required args params = dict ( file_id = file_id ) return TelegramBotRPCRequest ( 'getFile' , params = params , on_result = File . from_result , * * kwargs ) | Use this method to get basic info about a file and prepare it for downloading . | 63 | 16 |
27,954 | def get_updates ( offset = None , limit = None , timeout = None , allowed_updates = None , * * kwargs ) : # optional parameters params = _clean_params ( offset = offset , limit = limit , timeout = timeout , allowed_updates = allowed_updates , ) return TelegramBotRPCRequest ( 'getUpdates' , params = params , on_result = Update . from_result , * * kwargs ) | Use this method to receive incoming updates using long polling . | 98 | 11 |
27,955 | async def connect ( self , cluster_id , client_id , nats = None , connect_timeout = DEFAULT_CONNECT_TIMEOUT , max_pub_acks_inflight = DEFAULT_MAX_PUB_ACKS_INFLIGHT , loop = None , ) : self . _cluster_id = cluster_id self . _client_id = client_id self . _loop = loop self . _connect_timeout = connect_timeout if nats is not None : self . _nc = nats # NATS Streaming client should use same event loop # as the borrowed NATS connection. self . _loop = self . _nc . _loop # Subjects self . _discover_subject = DEFAULT_DISCOVER_SUBJECT % self . _cluster_id self . _hb_inbox = new_guid ( ) self . _ack_subject = DEFAULT_ACKS_SUBJECT % new_guid ( ) # Pending pub acks inflight self . _pending_pub_acks_queue = asyncio . Queue ( maxsize = max_pub_acks_inflight , loop = self . _loop ) # Heartbeats subscription self . _hb_inbox_sid = await self . _nc . subscribe ( self . _hb_inbox , cb = self . _process_heartbeats , ) # Acks processing subscription self . _ack_subject_sid = await self . _nc . subscribe ( self . _ack_subject , cb = self . _process_ack , ) await self . _nc . flush ( ) # Start NATS Streaming session by sending ConnectRequest creq = protocol . ConnectRequest ( ) creq . clientID = self . _client_id creq . heartbeatInbox = self . _hb_inbox payload = creq . SerializeToString ( ) msg = None try : msg = await self . _nc . request ( self . _discover_subject , payload , timeout = self . _connect_timeout , ) except : await self . _close ( ) raise ErrConnectReqTimeout ( "stan: failed connecting to '{}'" . format ( cluster_id ) ) # We should get the NATS Streaming subject from the # response from the ConnectRequest. resp = protocol . ConnectResponse ( ) resp . ParseFromString ( msg . data ) if resp . error != "" : try : await self . _close ( ) except : pass raise StanError ( resp . error ) self . _pub_prefix = resp . pubPrefix self . _sub_req_subject = resp . subRequests self . _unsub_req_subject = resp . unsubRequests self . _close_req_subject = resp . closeRequests self . _sub_close_req_subject = resp . subCloseRequests | Starts a session with a NATS Streaming cluster . | 612 | 11 |
27,956 | async def _process_ack ( self , msg ) : pub_ack = protocol . PubAck ( ) pub_ack . ParseFromString ( msg . data ) # Unblock pending acks queue if required. if not self . _pending_pub_acks_queue . empty ( ) : await self . _pending_pub_acks_queue . get ( ) try : cb = self . _pub_ack_map [ pub_ack . guid ] await cb ( pub_ack ) del self . _pub_ack_map [ pub_ack . guid ] except KeyError : # Just skip the pub ack return except : # TODO: Check for protocol error return | Receives acks from the publishes via the _STAN . acks subscription . | 148 | 18 |
27,957 | async def _process_msg ( self , sub ) : while True : try : raw_msg = await sub . _msgs_queue . get ( ) msg = Msg ( ) msg_proto = protocol . MsgProto ( ) msg_proto . ParseFromString ( raw_msg . data ) msg . proto = msg_proto msg . sub = sub # Yield the message to the subscription callback. await sub . cb ( msg ) if not sub . manual_acks : # Process auto-ack if not done manually in the callback, # by publishing into the ack inbox from the subscription. msg_ack = protocol . Ack ( ) msg_ack . subject = msg . proto . subject msg_ack . sequence = msg . proto . sequence await self . _nc . publish ( sub . ack_inbox , msg_ack . SerializeToString ( ) ) except asyncio . CancelledError : break except Exception as ex : if sub . error_cb : try : await sub . error_cb ( ex ) except : logger . exception ( "Exception in error callback for subscription to '%s'" , sub . subject ) continue | Receives the msgs from the STAN subscriptions and replies . By default it will reply back with an ack unless manual acking was specified in one of the subscription options . | 247 | 37 |
27,958 | async def ack ( self , msg ) : ack_proto = protocol . Ack ( ) ack_proto . subject = msg . proto . subject ack_proto . sequence = msg . proto . sequence await self . _nc . publish ( msg . sub . ack_inbox , ack_proto . SerializeToString ( ) ) | Used to manually acks a message . | 79 | 8 |
27,959 | async def publish ( self , subject , payload , ack_handler = None , ack_wait = DEFAULT_ACK_WAIT , ) : stan_subject = '' . join ( [ self . _pub_prefix , '.' , subject ] ) guid = new_guid ( ) pe = protocol . PubMsg ( ) pe . clientID = self . _client_id pe . guid = guid pe . subject = subject pe . data = payload # Control max inflight pubs for the client with a buffered queue. await self . _pending_pub_acks_queue . put ( None ) # Process asynchronously if a handler is given. if ack_handler is not None : self . _pub_ack_map [ guid ] = ack_handler try : await self . _nc . publish_request ( stan_subject , self . _ack_subject , pe . SerializeToString ( ) , ) return except Exception as e : del self . _pub_ack_map [ guid ] raise e else : # Synchronous wait for ack handling. future = asyncio . Future ( loop = self . _loop ) async def cb ( pub_ack ) : nonlocal future future . set_result ( pub_ack ) self . _pub_ack_map [ guid ] = cb try : await self . _nc . publish_request ( stan_subject , self . _ack_subject , pe . SerializeToString ( ) , ) await asyncio . wait_for ( future , ack_wait , loop = self . _loop ) return future . result ( ) except Exception as e : # Remove pending future before raising error. future . cancel ( ) del self . _pub_ack_map [ guid ] raise e | Publishes a payload onto a subject . By default it will block until the message which has been published has been acked back . An optional async handler can be publi | 377 | 35 |
27,960 | async def _close ( self ) : # Remove the core NATS Streaming subscriptions. try : if self . _hb_inbox_sid is not None : await self . _nc . unsubscribe ( self . _hb_inbox_sid ) self . _hb_inbox = None self . _hb_inbox_sid = None if self . _ack_subject_sid is not None : await self . _nc . unsubscribe ( self . _ack_subject_sid ) self . _ack_subject = None self . _ack_subject_sid = None except : # FIXME: async error in case these fail? pass # Remove all the related subscriptions for _ , sub in self . _sub_map . items ( ) : if sub . _msgs_task is not None : sub . _msgs_task . cancel ( ) try : await self . _nc . unsubscribe ( sub . sid ) except : continue self . _sub_map = { } | Removes any present internal state from the client . | 212 | 10 |
27,961 | async def close ( self ) : # Remove the core NATS Streaming subscriptions. await self . _close ( ) req = protocol . CloseRequest ( ) req . clientID = self . _client_id msg = await self . _nc . request ( self . _close_req_subject , req . SerializeToString ( ) , self . _connect_timeout , ) resp = protocol . CloseResponse ( ) resp . ParseFromString ( msg . data ) if resp . error != "" : raise StanError ( resp . error ) | Close terminates a session with NATS Streaming . | 113 | 10 |
27,962 | async def unsubscribe ( self ) : await self . _nc . unsubscribe ( self . sid ) try : # Stop the processing task for the subscription. sub = self . _sc . _sub_map [ self . inbox ] sub . _msgs_task . cancel ( ) del self . _sc . _sub_map [ self . inbox ] except KeyError : pass req = protocol . UnsubscribeRequest ( ) req . clientID = self . _sc . _client_id req . subject = self . subject req . inbox = self . ack_inbox if self . durable_name is not None : req . durableName = self . durable_name msg = await self . _nc . request ( self . _sc . _unsub_req_subject , req . SerializeToString ( ) , self . _sc . _connect_timeout , ) resp = protocol . SubscriptionResponse ( ) resp . ParseFromString ( msg . data ) if resp . error != "" : raise StanError ( resp . error ) | Remove subscription on a topic in this client . | 220 | 9 |
27,963 | def datetime_from_ldap ( value ) : if not value : return None match = LDAP_DATETIME_RE . match ( value ) if not match : return None groups = match . groupdict ( ) if groups [ 'microsecond' ] : groups [ 'microsecond' ] = groups [ 'microsecond' ] . ljust ( 6 , '0' ) [ : 6 ] tzinfo = groups . pop ( 'tzinfo' ) if tzinfo == 'Z' : tzinfo = timezone . utc else : offset_mins = int ( tzinfo [ - 2 : ] ) if len ( tzinfo ) == 5 else 0 offset = 60 * int ( tzinfo [ 1 : 3 ] ) + offset_mins if tzinfo [ 0 ] == '-' : offset = - offset tzinfo = timezone . get_fixed_timezone ( offset ) kwargs = { k : int ( v ) for k , v in groups . items ( ) if v is not None } kwargs [ 'tzinfo' ] = tzinfo return datetime . datetime ( * * kwargs ) | Convert a LDAP - style datetime to a Python aware object . | 248 | 15 |
27,964 | def get_db_prep_value ( self , value , connection , prepared = False ) : if prepared : return value if value is None : return [ ] values = value if self . multi_valued_field else [ value ] prepared_values = [ self . get_prep_value ( v ) for v in values ] # Remove duplicates. # https://tools.ietf.org/html/rfc4511#section-4.1.7 : # "The set of attribute values is unordered." # We keep those values sorted in natural order to avoid useless # updates to the LDAP server. return list ( sorted ( set ( v for v in prepared_values if v ) ) ) | Prepare a value for DB interaction . | 148 | 8 |
27,965 | def build_rdn ( self ) : bits = [ ] for field in self . _meta . fields : if field . db_column and field . primary_key : bits . append ( "%s=%s" % ( field . db_column , getattr ( self , field . name ) ) ) if not len ( bits ) : raise Exception ( "Could not build Distinguished Name" ) return '+' . join ( bits ) | Build the Relative Distinguished Name for this entry . | 93 | 10 |
27,966 | def delete ( self , using = None ) : using = using or router . db_for_write ( self . __class__ , instance = self ) connection = connections [ using ] logger . debug ( "Deleting LDAP entry %s" % self . dn ) connection . delete_s ( self . dn ) signals . post_delete . send ( sender = self . __class__ , instance = self ) | Delete this entry . | 89 | 4 |
27,967 | def _save_table ( self , raw = False , cls = None , force_insert = None , force_update = None , using = None , update_fields = None ) : # Connection aliasing connection = connections [ using ] create = bool ( force_insert or not self . dn ) # Prepare fields if update_fields : target_fields = [ self . _meta . get_field ( name ) for name in update_fields ] else : target_fields = [ field for field in cls . _meta . get_fields ( include_hidden = True ) if field . concrete and not field . primary_key ] def get_field_value ( field , instance ) : python_value = getattr ( instance , field . attname ) return field . get_db_prep_save ( python_value , connection = connection ) if create : old = None else : old = cls . objects . using ( using ) . get ( dn = self . _saved_dn ) changes = { field . db_column : ( None if old is None else get_field_value ( field , old ) , get_field_value ( field , self ) , ) for field in target_fields } # Actual saving old_dn = self . dn new_dn = self . build_dn ( ) updated = False # Insertion if create : # FIXME(rbarrois): This should be handled through a hidden field. hidden_values = [ ( 'objectClass' , [ obj_class . encode ( 'utf-8' ) for obj_class in self . object_classes ] ) ] new_values = hidden_values + [ ( colname , change [ 1 ] ) for colname , change in sorted ( changes . items ( ) ) if change [ 1 ] != [ ] ] new_dn = self . build_dn ( ) logger . debug ( "Creating new LDAP entry %s" , new_dn ) connection . add_s ( new_dn , new_values ) # Update else : modlist = [ ] for colname , change in sorted ( changes . items ( ) ) : old_value , new_value = change if old_value == new_value : continue modlist . append ( ( ldap . MOD_DELETE if new_value == [ ] else ldap . MOD_REPLACE , colname , new_value , ) ) if new_dn != old_dn : logger . debug ( "renaming ldap entry %s to %s" , old_dn , new_dn ) connection . rename_s ( old_dn , self . build_rdn ( ) ) if modlist : logger . debug ( "Modifying existing LDAP entry %s" , new_dn ) connection . modify_s ( new_dn , modlist ) updated = True self . dn = new_dn # Finishing self . _saved_dn = self . dn return updated | Saves the current instance . | 631 | 6 |
27,968 | def scoped ( base_class , base_dn ) : class Meta : proxy = True verbose_name = base_class . _meta . verbose_name verbose_name_plural = base_class . _meta . verbose_name_plural import re suffix = re . sub ( '[=,]' , '_' , base_dn ) name = "%s_%s" % ( base_class . __name__ , str ( suffix ) ) new_class = type ( str ( name ) , ( base_class , ) , { 'base_dn' : base_dn , '__module__' : base_class . __module__ , 'Meta' : Meta } ) return new_class | Returns a copy of the current class with a different base_dn . | 155 | 14 |
27,969 | def get_connection_params ( self ) : return { 'uri' : self . settings_dict [ 'NAME' ] , 'tls' : self . settings_dict . get ( 'TLS' , False ) , 'bind_dn' : self . settings_dict [ 'USER' ] , 'bind_pw' : self . settings_dict [ 'PASSWORD' ] , 'retry_max' : self . settings_dict . get ( 'RETRY_MAX' , 1 ) , 'retry_delay' : self . settings_dict . get ( 'RETRY_DELAY' , 60.0 ) , 'options' : { k if isinstance ( k , int ) else k . lower ( ) : v for k , v in self . settings_dict . get ( 'CONNECTION_OPTIONS' , { } ) . items ( ) } , } | Compute appropriate parameters for establishing a new connection . | 194 | 10 |
27,970 | def get_new_connection ( self , conn_params ) : connection = ldap . ldapobject . ReconnectLDAPObject ( uri = conn_params [ 'uri' ] , retry_max = conn_params [ 'retry_max' ] , retry_delay = conn_params [ 'retry_delay' ] , bytes_mode = False ) options = conn_params [ 'options' ] for opt , value in options . items ( ) : if opt == 'query_timeout' : connection . timeout = int ( value ) elif opt == 'page_size' : self . page_size = int ( value ) else : connection . set_option ( opt , value ) if conn_params [ 'tls' ] : connection . start_tls_s ( ) connection . simple_bind_s ( conn_params [ 'bind_dn' ] , conn_params [ 'bind_pw' ] , ) return connection | Build a connection from its parameters . | 208 | 7 |
27,971 | def query_as_ldap ( query , compiler , connection ) : if query . is_empty ( ) : return if query . model . _meta . model_name == 'migration' and not hasattr ( query . model , 'object_classes' ) : # FIXME(rbarrois): Support migrations return # FIXME(rbarrois): this could be an extra Where clause filterstr = '' . join ( [ '(objectClass=%s)' % cls for cls in query . model . object_classes ] ) # FIXME(rbarrois): Remove this code as part of #101 if ( len ( query . where . children ) == 1 and not isinstance ( query . where . children [ 0 ] , WhereNode ) and query . where . children [ 0 ] . lhs . target . column == 'dn' ) : lookup = query . where . children [ 0 ] if lookup . lookup_name != 'exact' : raise LdapDBError ( "Unsupported dn lookup: %s" % lookup . lookup_name ) return LdapLookup ( base = lookup . rhs , scope = ldap . SCOPE_BASE , filterstr = '(&%s)' % filterstr , ) sql , params = compiler . compile ( query . where ) if sql : filterstr += '(%s)' % ( sql % tuple ( escape_ldap_filter ( param ) for param in params ) ) return LdapLookup ( base = query . model . base_dn , scope = query . model . search_scope , filterstr = '(&%s)' % filterstr , ) | Convert a django . db . models . sql . query . Query to a LdapLookup . | 353 | 23 |
27,972 | def where_node_as_ldap ( where , compiler , connection ) : bits , params = [ ] , [ ] for item in where . children : if isinstance ( item , WhereNode ) : clause , clause_params = compiler . compile ( item ) else : clause , clause_params = item . as_sql ( compiler , connection ) bits . append ( clause ) params . extend ( clause_params ) if not bits : return '' , [ ] # FIXME(rbarrois): shouldn't we flatten recursive AND / OR? if len ( bits ) == 1 : clause = bits [ 0 ] elif where . connector == AND : clause = '&' + '' . join ( '(%s)' % bit for bit in bits ) elif where . connector == OR : clause = '|' + '' . join ( '(%s)' % bit for bit in bits ) else : raise LdapDBError ( "Unhandled WHERE connector: %s" % where . connector ) if where . negated : clause = ( '!(%s)' % clause ) return clause , params | Parse a django . db . models . sql . where . WhereNode . | 232 | 17 |
27,973 | def compile ( self , node , * args , * * kwargs ) : if isinstance ( node , WhereNode ) : return where_node_as_ldap ( node , self , self . connection ) return super ( SQLCompiler , self ) . compile ( node , * args , * * kwargs ) | Parse a WhereNode to a LDAP filter string . | 69 | 12 |
27,974 | def to_spans ( self ) : s = set ( ) self . _convert_to_spans ( self . tree , 1 , s ) return s | Convert the tree to a set of nonterms and spans . | 35 | 13 |
27,975 | def increment ( self , gold_set , test_set ) : self . gold += len ( gold_set ) self . test += len ( test_set ) self . correct += len ( gold_set & test_set ) | Add examples from sets . | 48 | 5 |
27,976 | def output_row ( self , name ) : print ( "%10s %4d %0.3f %0.3f %0.3f" % ( name , self . gold , self . precision ( ) , self . recall ( ) , self . fscore ( ) ) ) | Output a scoring row . | 62 | 5 |
27,977 | def output ( self ) : FScore . output_header ( ) nts = list ( self . nt_score . keys ( ) ) nts . sort ( ) for nt in nts : self . nt_score [ nt ] . output_row ( nt ) print ( ) self . total_score . output_row ( "total" ) | Print out the f - score table . | 78 | 8 |
27,978 | def invoke ( ctx , data_file ) : click . echo ( 'invoking' ) response = ctx . invoke ( data_file . read ( ) ) log_data = base64 . b64decode ( response [ 'LogResult' ] ) click . echo ( log_data ) click . echo ( 'Response:' ) click . echo ( response [ 'Payload' ] . read ( ) ) click . echo ( 'done' ) | Invoke the command synchronously | 95 | 6 |
27,979 | def tail ( ctx ) : click . echo ( 'tailing logs' ) for e in ctx . tail ( ) [ - 10 : ] : ts = datetime . utcfromtimestamp ( e [ 'timestamp' ] // 1000 ) . isoformat ( ) click . echo ( "{}: {}" . format ( ts , e [ 'message' ] ) ) click . echo ( 'done' ) | Show the last 10 lines of the log file | 87 | 9 |
27,980 | def status ( ctx ) : status = ctx . status ( ) click . echo ( click . style ( 'Policy' , bold = True ) ) if status [ 'policy' ] : line = ' {} ({})' . format ( status [ 'policy' ] [ 'PolicyName' ] , status [ 'policy' ] [ 'Arn' ] ) click . echo ( click . style ( line , fg = 'green' ) ) click . echo ( click . style ( 'Role' , bold = True ) ) if status [ 'role' ] : line = ' {} ({})' . format ( status [ 'role' ] [ 'RoleName' ] , status [ 'role' ] [ 'Arn' ] ) click . echo ( click . style ( line , fg = 'green' ) ) click . echo ( click . style ( 'Function' , bold = True ) ) if status [ 'function' ] : line = ' {} ({})' . format ( status [ 'function' ] [ 'Configuration' ] [ 'FunctionName' ] , status [ 'function' ] [ 'Configuration' ] [ 'FunctionArn' ] ) click . echo ( click . style ( line , fg = 'green' ) ) else : click . echo ( click . style ( ' None' , fg = 'green' ) ) click . echo ( click . style ( 'Event Sources' , bold = True ) ) if status [ 'event_sources' ] : for event_source in status [ 'event_sources' ] : if event_source : arn = event_source . get ( 'EventSourceArn' ) state = event_source . get ( 'State' , 'Enabled' ) line = ' {}: {}' . format ( arn , state ) click . echo ( click . style ( line , fg = 'green' ) ) else : click . echo ( click . style ( ' None' , fg = 'green' ) ) | Print a status of this Lambda function | 421 | 8 |
27,981 | def event_sources ( ctx , command ) : if command == 'list' : click . echo ( 'listing event sources' ) event_sources = ctx . list_event_sources ( ) for es in event_sources : click . echo ( 'arn: {}' . format ( es [ 'arn' ] ) ) click . echo ( 'starting position: {}' . format ( es [ 'starting_position' ] ) ) click . echo ( 'batch size: {}' . format ( es [ 'batch_size' ] ) ) click . echo ( 'enabled: {}' . format ( es [ 'enabled' ] ) ) click . echo ( 'done' ) elif command == 'enable' : click . echo ( 'enabling event sources' ) ctx . enable_event_sources ( ) click . echo ( 'done' ) elif command == 'disable' : click . echo ( 'disabling event sources' ) ctx . disable_event_sources ( ) click . echo ( 'done' ) | List enable and disable event sources specified in the config file | 224 | 11 |
27,982 | def check_interface_is_subset ( circuit1 , circuit2 ) : circuit1_port_names = circuit1 . interface . ports . keys ( ) for name in circuit1_port_names : if name not in circuit2 . interface . ports : raise ValueError ( f"{circuit2} (circuit2) does not have port {name}" ) circuit1_kind = type ( type ( getattr ( circuit1 , name ) ) ) circuit2_kind = type ( type ( getattr ( circuit2 , name ) ) ) circuit1_sub_circuit2 = issubclass ( circuit1_kind , circuit2_kind ) circuit2_sub_circuit1 = issubclass ( circuit2_kind , circuit1_kind ) # Check that the type of one could be converted to the other if not ( circuit1_sub_circuit2 or circuit2_sub_circuit1 ) : raise ValueError ( f"Port {name} types don't match:" f" Type0={circuit1_kind}," f" Type1={circuit2_kind}" ) | Checks that the interface of circuit1 is a subset of circuit2 | 236 | 14 |
27,983 | def CoerceValue ( value , value_type ) : if isinstance ( value , tuple ) : # In case of a tuple, we run the same function on the value itself and # add the formatted value. if ( len ( value ) not in [ 2 , 3 ] or ( len ( value ) == 3 and not isinstance ( value [ 2 ] , dict ) ) ) : raise DataTableException ( "Wrong format for value and formatting - %s." % str ( value ) ) if not isinstance ( value [ 1 ] , six . string_types + ( type ( None ) , ) ) : raise DataTableException ( "Formatted value is not string, given %s." % type ( value [ 1 ] ) ) js_value = DataTable . CoerceValue ( value [ 0 ] , value_type ) return ( js_value , ) + value [ 1 : ] t_value = type ( value ) if value is None : return value if value_type == "boolean" : return bool ( value ) elif value_type == "number" : if isinstance ( value , six . integer_types + ( float , ) ) : return value raise DataTableException ( "Wrong type %s when expected number" % t_value ) elif value_type == "string" : if isinstance ( value , six . text_type ) : return value if isinstance ( value , bytes ) : return six . text_type ( value , encoding = "utf-8" ) else : return six . text_type ( value ) elif value_type == "date" : if isinstance ( value , datetime . datetime ) : return datetime . date ( value . year , value . month , value . day ) elif isinstance ( value , datetime . date ) : return value else : raise DataTableException ( "Wrong type %s when expected date" % t_value ) elif value_type == "timeofday" : if isinstance ( value , datetime . datetime ) : return datetime . time ( value . hour , value . minute , value . second ) elif isinstance ( value , datetime . time ) : return value else : raise DataTableException ( "Wrong type %s when expected time" % t_value ) elif value_type == "datetime" : if isinstance ( value , datetime . datetime ) : return value else : raise DataTableException ( "Wrong type %s when expected datetime" % t_value ) # If we got here, it means the given value_type was not one of the # supported types. raise DataTableException ( "Unsupported type %s" % value_type ) | Coerces a single value into the type expected for its column . | 577 | 14 |
27,984 | def ColumnTypeParser ( description ) : if not description : raise DataTableException ( "Description error: empty description given" ) if not isinstance ( description , ( six . string_types , tuple ) ) : raise DataTableException ( "Description error: expected either string or " "tuple, got %s." % type ( description ) ) if isinstance ( description , six . string_types ) : description = ( description , ) # According to the tuple's length, we fill the keys # We verify everything is of type string for elem in description [ : 3 ] : if not isinstance ( elem , six . string_types ) : raise DataTableException ( "Description error: expected tuple of " "strings, current element of type %s." % type ( elem ) ) desc_dict = { "id" : description [ 0 ] , "label" : description [ 0 ] , "type" : "string" , "custom_properties" : { } } if len ( description ) > 1 : desc_dict [ "type" ] = description [ 1 ] . lower ( ) if len ( description ) > 2 : desc_dict [ "label" ] = description [ 2 ] if len ( description ) > 3 : if not isinstance ( description [ 3 ] , dict ) : raise DataTableException ( "Description error: expected custom " "properties of type dict, current element " "of type %s." % type ( description [ 3 ] ) ) desc_dict [ "custom_properties" ] = description [ 3 ] if len ( description ) > 4 : raise DataTableException ( "Description error: tuple of length > 4" ) if desc_dict [ "type" ] not in [ "string" , "number" , "boolean" , "date" , "datetime" , "timeofday" ] : raise DataTableException ( "Description error: unsupported type '%s'" % desc_dict [ "type" ] ) return desc_dict | Parses a single column description . Internal helper method . | 418 | 12 |
27,985 | def TableDescriptionParser ( table_description , depth = 0 ) : # For the recursion step, we check for a scalar object (string or tuple) if isinstance ( table_description , ( six . string_types , tuple ) ) : parsed_col = DataTable . ColumnTypeParser ( table_description ) parsed_col [ "depth" ] = depth parsed_col [ "container" ] = "scalar" return [ parsed_col ] # Since it is not scalar, table_description must be iterable. if not hasattr ( table_description , "__iter__" ) : raise DataTableException ( "Expected an iterable object, got %s" % type ( table_description ) ) if not isinstance ( table_description , dict ) : # We expects a non-dictionary iterable item. columns = [ ] for desc in table_description : parsed_col = DataTable . ColumnTypeParser ( desc ) parsed_col [ "depth" ] = depth parsed_col [ "container" ] = "iter" columns . append ( parsed_col ) if not columns : raise DataTableException ( "Description iterable objects should not" " be empty." ) return columns # The other case is a dictionary if not table_description : raise DataTableException ( "Empty dictionaries are not allowed inside" " description" ) # To differentiate between the two cases of more levels below or this is # the most inner dictionary, we consider the number of keys (more then one # key is indication for most inner dictionary) and the type of the key and # value in case of only 1 key (if the type of key is string and the type of # the value is a tuple of 0-3 items, we assume this is the most inner # dictionary). # NOTE: this way of differentiating might create ambiguity. See docs. if ( len ( table_description ) != 1 or ( isinstance ( next ( six . iterkeys ( table_description ) ) , six . string_types ) and isinstance ( next ( six . itervalues ( table_description ) ) , tuple ) and len ( next ( six . itervalues ( table_description ) ) ) < 4 ) ) : # This is the most inner dictionary. Parsing types. columns = [ ] # We sort the items, equivalent to sort the keys since they are unique for key , value in sorted ( table_description . items ( ) ) : # We parse the column type as (key, type) or (key, type, label) using # ColumnTypeParser. if isinstance ( value , tuple ) : parsed_col = DataTable . ColumnTypeParser ( ( key , ) + value ) else : parsed_col = DataTable . ColumnTypeParser ( ( key , value ) ) parsed_col [ "depth" ] = depth parsed_col [ "container" ] = "dict" columns . append ( parsed_col ) return columns # This is an outer dictionary, must have at most one key. parsed_col = DataTable . ColumnTypeParser ( sorted ( table_description . keys ( ) ) [ 0 ] ) parsed_col [ "depth" ] = depth parsed_col [ "container" ] = "dict" return ( [ parsed_col ] + DataTable . TableDescriptionParser ( sorted ( table_description . values ( ) ) [ 0 ] , depth = depth + 1 ) ) | Parses the table_description object for internal use . | 716 | 12 |
27,986 | def LoadData ( self , data , custom_properties = None ) : self . __data = [ ] self . AppendData ( data , custom_properties ) | Loads new rows to the data table clearing existing rows . | 34 | 12 |
27,987 | def AppendData ( self , data , custom_properties = None ) : # If the maximal depth is 0, we simply iterate over the data table # lines and insert them using _InnerAppendData. Otherwise, we simply # let the _InnerAppendData handle all the levels. if not self . __columns [ - 1 ] [ "depth" ] : for row in data : self . _InnerAppendData ( ( { } , custom_properties ) , row , 0 ) else : self . _InnerAppendData ( ( { } , custom_properties ) , data , 0 ) | Appends new data to the table . | 131 | 8 |
27,988 | def _InnerAppendData ( self , prev_col_values , data , col_index ) : # We first check that col_index has not exceeded the columns size if col_index >= len ( self . __columns ) : raise DataTableException ( "The data does not match description, too deep" ) # Dealing with the scalar case, the data is the last value. if self . __columns [ col_index ] [ "container" ] == "scalar" : prev_col_values [ 0 ] [ self . __columns [ col_index ] [ "id" ] ] = data self . __data . append ( prev_col_values ) return if self . __columns [ col_index ] [ "container" ] == "iter" : if not hasattr ( data , "__iter__" ) or isinstance ( data , dict ) : raise DataTableException ( "Expected iterable object, got %s" % type ( data ) ) # We only need to insert the rest of the columns # If there are less items than expected, we only add what there is. for value in data : if col_index >= len ( self . __columns ) : raise DataTableException ( "Too many elements given in data" ) prev_col_values [ 0 ] [ self . __columns [ col_index ] [ "id" ] ] = value col_index += 1 self . __data . append ( prev_col_values ) return # We know the current level is a dictionary, we verify the type. if not isinstance ( data , dict ) : raise DataTableException ( "Expected dictionary at current level, got %s" % type ( data ) ) # We check if this is the last level if self . __columns [ col_index ] [ "depth" ] == self . __columns [ - 1 ] [ "depth" ] : # We need to add the keys in the dictionary as they are for col in self . __columns [ col_index : ] : if col [ "id" ] in data : prev_col_values [ 0 ] [ col [ "id" ] ] = data [ col [ "id" ] ] self . __data . append ( prev_col_values ) return # We have a dictionary in an inner depth level. if not data . keys ( ) : # In case this is an empty dictionary, we add a record with the columns # filled only until this point. self . __data . append ( prev_col_values ) else : for key in sorted ( data ) : col_values = dict ( prev_col_values [ 0 ] ) col_values [ self . __columns [ col_index ] [ "id" ] ] = key self . _InnerAppendData ( ( col_values , prev_col_values [ 1 ] ) , data [ key ] , col_index + 1 ) | Inner function to assist LoadData . | 623 | 8 |
27,989 | def _PreparedData ( self , order_by = ( ) ) : if not order_by : return self . __data sorted_data = self . __data [ : ] if isinstance ( order_by , six . string_types ) or ( isinstance ( order_by , tuple ) and len ( order_by ) == 2 and order_by [ 1 ] . lower ( ) in [ "asc" , "desc" ] ) : order_by = ( order_by , ) for key in reversed ( order_by ) : if isinstance ( key , six . string_types ) : sorted_data . sort ( key = lambda x : x [ 0 ] . get ( key ) ) elif ( isinstance ( key , ( list , tuple ) ) and len ( key ) == 2 and key [ 1 ] . lower ( ) in ( "asc" , "desc" ) ) : key_func = lambda x : x [ 0 ] . get ( key [ 0 ] ) sorted_data . sort ( key = key_func , reverse = key [ 1 ] . lower ( ) != "asc" ) else : raise DataTableException ( "Expected tuple with second value: " "'asc' or 'desc'" ) return sorted_data | Prepares the data for enumeration - sorting it by order_by . | 265 | 15 |
27,990 | def ToJSCode ( self , name , columns_order = None , order_by = ( ) ) : encoder = DataTableJSONEncoder ( ) if columns_order is None : columns_order = [ col [ "id" ] for col in self . __columns ] col_dict = dict ( [ ( col [ "id" ] , col ) for col in self . __columns ] ) # We first create the table with the given name jscode = "var %s = new google.visualization.DataTable();\n" % name if self . custom_properties : jscode += "%s.setTableProperties(%s);\n" % ( name , encoder . encode ( self . custom_properties ) ) # We add the columns to the table for i , col in enumerate ( columns_order ) : jscode += "%s.addColumn(%s, %s, %s);\n" % ( name , encoder . encode ( col_dict [ col ] [ "type" ] ) , encoder . encode ( col_dict [ col ] [ "label" ] ) , encoder . encode ( col_dict [ col ] [ "id" ] ) ) if col_dict [ col ] [ "custom_properties" ] : jscode += "%s.setColumnProperties(%d, %s);\n" % ( name , i , encoder . encode ( col_dict [ col ] [ "custom_properties" ] ) ) jscode += "%s.addRows(%d);\n" % ( name , len ( self . __data ) ) # We now go over the data and add each row for ( i , ( row , cp ) ) in enumerate ( self . _PreparedData ( order_by ) ) : # We add all the elements of this row by their order for ( j , col ) in enumerate ( columns_order ) : if col not in row or row [ col ] is None : continue value = self . CoerceValue ( row [ col ] , col_dict [ col ] [ "type" ] ) if isinstance ( value , tuple ) : cell_cp = "" if len ( value ) == 3 : cell_cp = ", %s" % encoder . encode ( row [ col ] [ 2 ] ) # We have a formatted value or custom property as well jscode += ( "%s.setCell(%d, %d, %s, %s%s);\n" % ( name , i , j , self . EscapeForJSCode ( encoder , value [ 0 ] ) , self . EscapeForJSCode ( encoder , value [ 1 ] ) , cell_cp ) ) else : jscode += "%s.setCell(%d, %d, %s);\n" % ( name , i , j , self . EscapeForJSCode ( encoder , value ) ) if cp : jscode += "%s.setRowProperties(%d, %s);\n" % ( name , i , encoder . encode ( cp ) ) return jscode | Writes the data table as a JS code string . | 677 | 11 |
27,991 | def ToHtml ( self , columns_order = None , order_by = ( ) ) : table_template = "<html><body><table border=\"1\">%s</table></body></html>" columns_template = "<thead><tr>%s</tr></thead>" rows_template = "<tbody>%s</tbody>" row_template = "<tr>%s</tr>" header_cell_template = "<th>%s</th>" cell_template = "<td>%s</td>" if columns_order is None : columns_order = [ col [ "id" ] for col in self . __columns ] col_dict = dict ( [ ( col [ "id" ] , col ) for col in self . __columns ] ) columns_list = [ ] for col in columns_order : columns_list . append ( header_cell_template % html . escape ( col_dict [ col ] [ "label" ] ) ) columns_html = columns_template % "" . join ( columns_list ) rows_list = [ ] # We now go over the data and add each row for row , unused_cp in self . _PreparedData ( order_by ) : cells_list = [ ] # We add all the elements of this row by their order for col in columns_order : # For empty string we want empty quotes (""). value = "" if col in row and row [ col ] is not None : value = self . CoerceValue ( row [ col ] , col_dict [ col ] [ "type" ] ) if isinstance ( value , tuple ) : # We have a formatted value and we're going to use it cells_list . append ( cell_template % html . escape ( self . ToString ( value [ 1 ] ) ) ) else : cells_list . append ( cell_template % html . escape ( self . ToString ( value ) ) ) rows_list . append ( row_template % "" . join ( cells_list ) ) rows_html = rows_template % "" . join ( rows_list ) return table_template % ( columns_html + rows_html ) | Writes the data table as an HTML table code string . | 461 | 12 |
27,992 | def ToCsv ( self , columns_order = None , order_by = ( ) , separator = "," ) : csv_buffer = six . StringIO ( ) writer = csv . writer ( csv_buffer , delimiter = separator ) if columns_order is None : columns_order = [ col [ "id" ] for col in self . __columns ] col_dict = dict ( [ ( col [ "id" ] , col ) for col in self . __columns ] ) def ensure_str ( s ) : "Compatibility function. Ensures using of str rather than unicode." if isinstance ( s , str ) : return s return s . encode ( "utf-8" ) writer . writerow ( [ ensure_str ( col_dict [ col ] [ "label" ] ) for col in columns_order ] ) # We now go over the data and add each row for row , unused_cp in self . _PreparedData ( order_by ) : cells_list = [ ] # We add all the elements of this row by their order for col in columns_order : value = "" if col in row and row [ col ] is not None : value = self . CoerceValue ( row [ col ] , col_dict [ col ] [ "type" ] ) if isinstance ( value , tuple ) : # We have a formatted value. Using it only for date/time types. if col_dict [ col ] [ "type" ] in [ "date" , "datetime" , "timeofday" ] : cells_list . append ( ensure_str ( self . ToString ( value [ 1 ] ) ) ) else : cells_list . append ( ensure_str ( self . ToString ( value [ 0 ] ) ) ) else : cells_list . append ( ensure_str ( self . ToString ( value ) ) ) writer . writerow ( cells_list ) return csv_buffer . getvalue ( ) | Writes the data table as a CSV string . | 423 | 10 |
27,993 | def ToTsvExcel ( self , columns_order = None , order_by = ( ) ) : csv_result = self . ToCsv ( columns_order , order_by , separator = "\t" ) if not isinstance ( csv_result , six . text_type ) : csv_result = csv_result . decode ( "utf-8" ) return csv_result . encode ( "UTF-16LE" ) | Returns a file in tab - separated - format readable by MS Excel . | 99 | 14 |
27,994 | def _ToJSonObj ( self , columns_order = None , order_by = ( ) ) : if columns_order is None : columns_order = [ col [ "id" ] for col in self . __columns ] col_dict = dict ( [ ( col [ "id" ] , col ) for col in self . __columns ] ) # Creating the column JSON objects col_objs = [ ] for col_id in columns_order : col_obj = { "id" : col_dict [ col_id ] [ "id" ] , "label" : col_dict [ col_id ] [ "label" ] , "type" : col_dict [ col_id ] [ "type" ] } if col_dict [ col_id ] [ "custom_properties" ] : col_obj [ "p" ] = col_dict [ col_id ] [ "custom_properties" ] col_objs . append ( col_obj ) # Creating the rows jsons row_objs = [ ] for row , cp in self . _PreparedData ( order_by ) : cell_objs = [ ] for col in columns_order : value = self . CoerceValue ( row . get ( col , None ) , col_dict [ col ] [ "type" ] ) if value is None : cell_obj = None elif isinstance ( value , tuple ) : cell_obj = { "v" : value [ 0 ] } if len ( value ) > 1 and value [ 1 ] is not None : cell_obj [ "f" ] = value [ 1 ] if len ( value ) == 3 : cell_obj [ "p" ] = value [ 2 ] else : cell_obj = { "v" : value } cell_objs . append ( cell_obj ) row_obj = { "c" : cell_objs } if cp : row_obj [ "p" ] = cp row_objs . append ( row_obj ) json_obj = { "cols" : col_objs , "rows" : row_objs } if self . custom_properties : json_obj [ "p" ] = self . custom_properties return json_obj | Returns an object suitable to be converted to JSON . | 479 | 10 |
27,995 | def ToJSon ( self , columns_order = None , order_by = ( ) ) : encoded_response_str = DataTableJSONEncoder ( ) . encode ( self . _ToJSonObj ( columns_order , order_by ) ) if not isinstance ( encoded_response_str , str ) : return encoded_response_str . encode ( "utf-8" ) return encoded_response_str | Returns a string that can be used in a JS DataTable constructor . | 89 | 14 |
27,996 | def ToJSonResponse ( self , columns_order = None , order_by = ( ) , req_id = 0 , response_handler = "google.visualization.Query.setResponse" ) : response_obj = { "version" : "0.6" , "reqId" : str ( req_id ) , "table" : self . _ToJSonObj ( columns_order , order_by ) , "status" : "ok" } encoded_response_str = DataTableJSONEncoder ( ) . encode ( response_obj ) if not isinstance ( encoded_response_str , str ) : encoded_response_str = encoded_response_str . encode ( "utf-8" ) return "%s(%s);" % ( response_handler , encoded_response_str ) | Writes a table as a JSON response that can be returned as - is to a client . | 174 | 19 |
27,997 | def ToResponse ( self , columns_order = None , order_by = ( ) , tqx = "" ) : tqx_dict = { } if tqx : tqx_dict = dict ( opt . split ( ":" ) for opt in tqx . split ( ";" ) ) if tqx_dict . get ( "version" , "0.6" ) != "0.6" : raise DataTableException ( "Version (%s) passed by request is not supported." % tqx_dict [ "version" ] ) if tqx_dict . get ( "out" , "json" ) == "json" : response_handler = tqx_dict . get ( "responseHandler" , "google.visualization.Query.setResponse" ) return self . ToJSonResponse ( columns_order , order_by , req_id = tqx_dict . get ( "reqId" , 0 ) , response_handler = response_handler ) elif tqx_dict [ "out" ] == "html" : return self . ToHtml ( columns_order , order_by ) elif tqx_dict [ "out" ] == "csv" : return self . ToCsv ( columns_order , order_by ) elif tqx_dict [ "out" ] == "tsv-excel" : return self . ToTsvExcel ( columns_order , order_by ) else : raise DataTableException ( "'out' parameter: '%s' is not supported" % tqx_dict [ "out" ] ) | Writes the right response according to the request string passed in tqx . | 351 | 16 |
27,998 | def copy_fields ( self , model ) : fields = { '__module__' : model . __module__ } for field in model . _meta . fields : if not field . name in self . _exclude : field = copy . deepcopy ( field ) if isinstance ( field , models . AutoField ) : #we replace the AutoField of the original model #with an IntegerField because a model can #have only one autofield. field . __class__ = models . IntegerField if field . primary_key : field . serialize = True #OneToOne fields should really be tracked #as ForeignKey fields if isinstance ( field , models . OneToOneField ) : field . __class__ = models . ForeignKey if field . primary_key or field . unique : #unique fields of the original model #can not be guaranteed to be unique #in the audit log entry but they #should still be indexed for faster lookups. field . primary_key = False field . _unique = False field . db_index = True if field . remote_field and field . remote_field . related_name : field . remote_field . related_name = '_auditlog_{}_{}' . format ( model . _meta . model_name , field . remote_field . related_name ) elif field . remote_field : try : if field . remote_field . get_accessor_name ( ) : field . remote_field . related_name = '_auditlog_{}_{}' . format ( model . _meta . model_name , field . remote_field . get_accessor_name ( ) ) except e : pass fields [ field . name ] = field return fields | Creates copies of the fields we are keeping track of for the provided model returning a dictionary mapping field name to a copied field object . | 366 | 27 |
27,999 | def get_logging_fields ( self , model ) : rel_name = '_%s_audit_log_entry' % model . _meta . object_name . lower ( ) def entry_instance_to_unicode ( log_entry ) : try : result = '%s: %s %s at %s' % ( model . _meta . object_name , log_entry . object_state , log_entry . get_action_type_display ( ) . lower ( ) , log_entry . action_date , ) except AttributeError : result = '%s %s at %s' % ( model . _meta . object_name , log_entry . get_action_type_display ( ) . lower ( ) , log_entry . action_date ) return result action_user_field = LastUserField ( related_name = rel_name , editable = False ) #check if the manager has been attached to auth user model if [ model . _meta . app_label , model . __name__ ] == getattr ( settings , 'AUTH_USER_MODEL' , 'auth.User' ) . split ( "." ) : action_user_field = LastUserField ( related_name = rel_name , editable = False , to = 'self' ) return { 'action_id' : models . AutoField ( primary_key = True ) , 'action_date' : models . DateTimeField ( default = datetime_now , editable = False , blank = False ) , 'action_user' : action_user_field , 'action_type' : models . CharField ( max_length = 1 , editable = False , choices = ( ( 'I' , _ ( 'Created' ) ) , ( 'U' , _ ( 'Changed' ) ) , ( 'D' , _ ( 'Deleted' ) ) , ) ) , 'object_state' : LogEntryObjectDescriptor ( model ) , '__unicode__' : entry_instance_to_unicode , } | Returns a dictionary mapping of the fields that are used for keeping the acutal audit log entries . | 446 | 20 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.