repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
broadinstitute/fiss | firecloud/fiss.py | space_lock | def space_lock(args):
""" Lock a workspace """
r = fapi.lock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Locked workspace {0}/{1}'.format(args.project, args.workspace))
return 0 | python | def space_lock(args):
""" Lock a workspace """
r = fapi.lock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Locked workspace {0}/{1}'.format(args.project, args.workspace))
return 0 | [
"def",
"space_lock",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"lock_workspace",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"204",
")",
"if",
"fcconfig",
".",
"verbosity",
":... | Lock a workspace | [
"Lock",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L85-L91 | train | 38,900 |
broadinstitute/fiss | firecloud/fiss.py | space_unlock | def space_unlock(args):
""" Unlock a workspace """
r = fapi.unlock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Unlocked workspace {0}/{1}'.format(args.project,args.workspace))
return 0 | python | def space_unlock(args):
""" Unlock a workspace """
r = fapi.unlock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Unlocked workspace {0}/{1}'.format(args.project,args.workspace))
return 0 | [
"def",
"space_unlock",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"unlock_workspace",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"204",
")",
"if",
"fcconfig",
".",
"verbosity",
... | Unlock a workspace | [
"Unlock",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L94-L100 | train | 38,901 |
broadinstitute/fiss | firecloud/fiss.py | space_new | def space_new(args):
""" Create a new workspace. """
r = fapi.create_workspace(args.project, args.workspace,
args.authdomain, dict())
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
eprint(r.content)
return 0 | python | def space_new(args):
""" Create a new workspace. """
r = fapi.create_workspace(args.project, args.workspace,
args.authdomain, dict())
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
eprint(r.content)
return 0 | [
"def",
"space_new",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"create_workspace",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"args",
".",
"authdomain",
",",
"dict",
"(",
")",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",... | Create a new workspace. | [
"Create",
"a",
"new",
"workspace",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L103-L110 | train | 38,902 |
broadinstitute/fiss | firecloud/fiss.py | space_info | def space_info(args):
""" Get metadata for a workspace. """
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.text | python | def space_info(args):
""" Get metadata for a workspace. """
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.text | [
"def",
"space_info",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_workspace",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"return",
"r",
".",
"text"
] | Get metadata for a workspace. | [
"Get",
"metadata",
"for",
"a",
"workspace",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L113-L117 | train | 38,903 |
broadinstitute/fiss | firecloud/fiss.py | space_delete | def space_delete(args):
""" Delete a workspace. """
message = "WARNING: this will delete workspace: \n\t{0}/{1}".format(
args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
r = fapi.delete_workspace(args.project, args.workspace)
fapi._check_response_code(r, [200, 202, 204, 404])
if fcconfig.verbosity:
print('Deleted workspace {0}/{1}'.format(args.project, args.workspace))
return 0 | python | def space_delete(args):
""" Delete a workspace. """
message = "WARNING: this will delete workspace: \n\t{0}/{1}".format(
args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
r = fapi.delete_workspace(args.project, args.workspace)
fapi._check_response_code(r, [200, 202, 204, 404])
if fcconfig.verbosity:
print('Deleted workspace {0}/{1}'.format(args.project, args.workspace))
return 0 | [
"def",
"space_delete",
"(",
"args",
")",
":",
"message",
"=",
"\"WARNING: this will delete workspace: \\n\\t{0}/{1}\"",
".",
"format",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"if",
"not",
"args",
".",
"yes",
"and",
"not",
"_confirm_promp... | Delete a workspace. | [
"Delete",
"a",
"workspace",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L120-L131 | train | 38,904 |
broadinstitute/fiss | firecloud/fiss.py | space_clone | def space_clone(args):
""" Replicate a workspace """
# FIXME: add --deep copy option (shallow by default)
# add aliasing capability, then make space_copy alias
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("Error: destination project and namespace must differ from"
" cloned workspace")
return 1
r = fapi.clone_workspace(args.project, args.workspace, args.to_project,
args.to_workspace)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
msg = "{}/{} successfully cloned to {}/{}".format(
args.project, args.workspace,
args.to_project, args.to_workspace)
print(msg)
return 0 | python | def space_clone(args):
""" Replicate a workspace """
# FIXME: add --deep copy option (shallow by default)
# add aliasing capability, then make space_copy alias
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("Error: destination project and namespace must differ from"
" cloned workspace")
return 1
r = fapi.clone_workspace(args.project, args.workspace, args.to_project,
args.to_workspace)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
msg = "{}/{} successfully cloned to {}/{}".format(
args.project, args.workspace,
args.to_project, args.to_workspace)
print(msg)
return 0 | [
"def",
"space_clone",
"(",
"args",
")",
":",
"# FIXME: add --deep copy option (shallow by default)",
"# add aliasing capability, then make space_copy alias",
"if",
"not",
"args",
".",
"to_workspace",
":",
"args",
".",
"to_workspace",
"=",
"args",
".",
"workspace",
"i... | Replicate a workspace | [
"Replicate",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L134-L158 | train | 38,905 |
broadinstitute/fiss | firecloud/fiss.py | space_acl | def space_acl(args):
''' Retrieve access control list for a workspace'''
r = fapi.get_workspace_acl(args.project, args.workspace)
fapi._check_response_code(r, 200)
result = dict()
for user, info in sorted(r.json()['acl'].items()):
result[user] = info['accessLevel']
return result | python | def space_acl(args):
''' Retrieve access control list for a workspace'''
r = fapi.get_workspace_acl(args.project, args.workspace)
fapi._check_response_code(r, 200)
result = dict()
for user, info in sorted(r.json()['acl'].items()):
result[user] = info['accessLevel']
return result | [
"def",
"space_acl",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_workspace_acl",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"result",
"=",
"dict",
"(",
")",
"... | Retrieve access control list for a workspace | [
"Retrieve",
"access",
"control",
"list",
"for",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L161-L168 | train | 38,906 |
broadinstitute/fiss | firecloud/fiss.py | space_set_acl | def space_set_acl(args):
""" Assign an ACL role to list of users for a workspace """
acl_updates = [{"email": user,
"accessLevel": args.role} for user in args.users]
r = fapi.update_workspace_acl(args.project, args.workspace, acl_updates)
fapi._check_response_code(r, 200)
errors = r.json()['usersNotFound']
if len(errors):
eprint("Unable to assign role for unrecognized users:")
for user in errors:
eprint("\t{0}".format(user['email']))
return 1
if fcconfig.verbosity:
print("Successfully updated {0} role(s)".format(len(acl_updates)))
return 0 | python | def space_set_acl(args):
""" Assign an ACL role to list of users for a workspace """
acl_updates = [{"email": user,
"accessLevel": args.role} for user in args.users]
r = fapi.update_workspace_acl(args.project, args.workspace, acl_updates)
fapi._check_response_code(r, 200)
errors = r.json()['usersNotFound']
if len(errors):
eprint("Unable to assign role for unrecognized users:")
for user in errors:
eprint("\t{0}".format(user['email']))
return 1
if fcconfig.verbosity:
print("Successfully updated {0} role(s)".format(len(acl_updates)))
return 0 | [
"def",
"space_set_acl",
"(",
"args",
")",
":",
"acl_updates",
"=",
"[",
"{",
"\"email\"",
":",
"user",
",",
"\"accessLevel\"",
":",
"args",
".",
"role",
"}",
"for",
"user",
"in",
"args",
".",
"users",
"]",
"r",
"=",
"fapi",
".",
"update_workspace_acl",
... | Assign an ACL role to list of users for a workspace | [
"Assign",
"an",
"ACL",
"role",
"to",
"list",
"of",
"users",
"for",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L171-L188 | train | 38,907 |
broadinstitute/fiss | firecloud/fiss.py | space_search | def space_search(args):
""" Search for workspaces matching certain criteria """
r = fapi.list_workspaces()
fapi._check_response_code(r, 200)
# Parse the JSON for workspace + namespace; then filter by
# search terms: each term is treated as a regular expression
workspaces = r.json()
extra_terms = []
if args.bucket:
workspaces = [w for w in workspaces
if re.search(args.bucket, w['workspace']['bucketName'])]
extra_terms.append('bucket')
# FIXME: add more filter terms
pretty_spaces = []
for space in workspaces:
ns = space['workspace']['namespace']
ws = space['workspace']['name']
pspace = ns + '/' + ws
# Always show workspace storage id
pspace += '\t' + space['workspace']['bucketName']
pretty_spaces.append(pspace)
# Sort for easier viewing, ignore case
return sorted(pretty_spaces, key=lambda s: s.lower()) | python | def space_search(args):
""" Search for workspaces matching certain criteria """
r = fapi.list_workspaces()
fapi._check_response_code(r, 200)
# Parse the JSON for workspace + namespace; then filter by
# search terms: each term is treated as a regular expression
workspaces = r.json()
extra_terms = []
if args.bucket:
workspaces = [w for w in workspaces
if re.search(args.bucket, w['workspace']['bucketName'])]
extra_terms.append('bucket')
# FIXME: add more filter terms
pretty_spaces = []
for space in workspaces:
ns = space['workspace']['namespace']
ws = space['workspace']['name']
pspace = ns + '/' + ws
# Always show workspace storage id
pspace += '\t' + space['workspace']['bucketName']
pretty_spaces.append(pspace)
# Sort for easier viewing, ignore case
return sorted(pretty_spaces, key=lambda s: s.lower()) | [
"def",
"space_search",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"list_workspaces",
"(",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"# Parse the JSON for workspace + namespace; then filter by",
"# search terms: each term is treated as a regul... | Search for workspaces matching certain criteria | [
"Search",
"for",
"workspaces",
"matching",
"certain",
"criteria"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L191-L216 | train | 38,908 |
broadinstitute/fiss | firecloud/fiss.py | entity_import | def entity_import(args):
""" Upload an entity loadfile. """
project = args.project
workspace = args.workspace
chunk_size = args.chunk_size
with open(args.tsvfile) as tsvf:
headerline = tsvf.readline().strip()
entity_data = [l.rstrip('\n') for l in tsvf]
return _batch_load(project, workspace, headerline, entity_data, chunk_size) | python | def entity_import(args):
""" Upload an entity loadfile. """
project = args.project
workspace = args.workspace
chunk_size = args.chunk_size
with open(args.tsvfile) as tsvf:
headerline = tsvf.readline().strip()
entity_data = [l.rstrip('\n') for l in tsvf]
return _batch_load(project, workspace, headerline, entity_data, chunk_size) | [
"def",
"entity_import",
"(",
"args",
")",
":",
"project",
"=",
"args",
".",
"project",
"workspace",
"=",
"args",
".",
"workspace",
"chunk_size",
"=",
"args",
".",
"chunk_size",
"with",
"open",
"(",
"args",
".",
"tsvfile",
")",
"as",
"tsvf",
":",
"headerl... | Upload an entity loadfile. | [
"Upload",
"an",
"entity",
"loadfile",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L219-L229 | train | 38,909 |
broadinstitute/fiss | firecloud/fiss.py | entity_types | def entity_types(args):
""" List entity types in a workspace """
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.json().keys() | python | def entity_types(args):
""" List entity types in a workspace """
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.json().keys() | [
"def",
"entity_types",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"list_entity_types",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"return",
"r",
".",
"json",
"(",
... | List entity types in a workspace | [
"List",
"entity",
"types",
"in",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L250-L254 | train | 38,910 |
broadinstitute/fiss | firecloud/fiss.py | entity_list | def entity_list(args):
""" List entities in a workspace. """
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ] | python | def entity_list(args):
""" List entities in a workspace. """
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ] | [
"def",
"entity_list",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_entities_with_type",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"return",
"[",
"'{0}\\t{1}'",
"... | List entities in a workspace. | [
"List",
"entities",
"in",
"a",
"workspace",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L257-L261 | train | 38,911 |
broadinstitute/fiss | firecloud/fiss.py | participant_list | def participant_list(args):
''' List participants within a container'''
# Case 1: retrieve participants within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for participant within participant (itself)
if args.entity_type == 'participant':
return [ args.entity.strip() ]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
participants = r.json()['attributes']["participants"]['items']
return [ participant['entityName'] for participant in participants ]
# Case 2: retrieve all participants within a workspace
return __get_entities(args, "participant", page_size=2000) | python | def participant_list(args):
''' List participants within a container'''
# Case 1: retrieve participants within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for participant within participant (itself)
if args.entity_type == 'participant':
return [ args.entity.strip() ]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
participants = r.json()['attributes']["participants"]['items']
return [ participant['entityName'] for participant in participants ]
# Case 2: retrieve all participants within a workspace
return __get_entities(args, "participant", page_size=2000) | [
"def",
"participant_list",
"(",
"args",
")",
":",
"# Case 1: retrieve participants within a named data entity",
"if",
"args",
".",
"entity_type",
"and",
"args",
".",
"entity",
":",
"# Edge case: caller asked for participant within participant (itself)",
"if",
"args",
".",
"en... | List participants within a container | [
"List",
"participants",
"within",
"a",
"container"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L283-L298 | train | 38,912 |
broadinstitute/fiss | firecloud/fiss.py | pair_list | def pair_list(args):
''' List pairs within a container. '''
# Case 1: retrieve pairs within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for pair within a pair (itself)
if args.entity_type == 'pair':
return [ args.entity.strip() ]
# Edge case: pairs for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
entities = _entity_paginator(args.project, args.workspace,
'pair', page_size=2000)
return [ e['name'] for e in entities if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
pairs = r.json()['attributes']["pairs"]['items']
return [ pair['entityName'] for pair in pairs]
# Case 2: retrieve all pairs within a workspace
return __get_entities(args, "pair", page_size=2000) | python | def pair_list(args):
''' List pairs within a container. '''
# Case 1: retrieve pairs within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for pair within a pair (itself)
if args.entity_type == 'pair':
return [ args.entity.strip() ]
# Edge case: pairs for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
entities = _entity_paginator(args.project, args.workspace,
'pair', page_size=2000)
return [ e['name'] for e in entities if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
pairs = r.json()['attributes']["pairs"]['items']
return [ pair['entityName'] for pair in pairs]
# Case 2: retrieve all pairs within a workspace
return __get_entities(args, "pair", page_size=2000) | [
"def",
"pair_list",
"(",
"args",
")",
":",
"# Case 1: retrieve pairs within a named data entity",
"if",
"args",
".",
"entity_type",
"and",
"args",
".",
"entity",
":",
"# Edge case: caller asked for pair within a pair (itself)",
"if",
"args",
".",
"entity_type",
"==",
"'pa... | List pairs within a container. | [
"List",
"pairs",
"within",
"a",
"container",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L301-L324 | train | 38,913 |
broadinstitute/fiss | firecloud/fiss.py | sample_list | def sample_list(args):
''' List samples within a container. '''
# Case 1: retrieve samples within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for samples within a sample (itself)
if args.entity_type == 'sample':
return [ args.entity.strip() ]
# Edge case: samples for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
samples = _entity_paginator(args.project, args.workspace,
'sample', page_size=2000)
return [ e['name'] for e in samples if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
if args.entity_type == 'pair':
pair = r.json()['attributes']
samples = [ pair['case_sample'], pair['control_sample'] ]
else:
samples = r.json()['attributes']["samples"]['items']
return [ sample['entityName'] for sample in samples ]
# Case 2: retrieve all samples within a workspace
return __get_entities(args, "sample", page_size=2000) | python | def sample_list(args):
''' List samples within a container. '''
# Case 1: retrieve samples within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for samples within a sample (itself)
if args.entity_type == 'sample':
return [ args.entity.strip() ]
# Edge case: samples for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
samples = _entity_paginator(args.project, args.workspace,
'sample', page_size=2000)
return [ e['name'] for e in samples if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
if args.entity_type == 'pair':
pair = r.json()['attributes']
samples = [ pair['case_sample'], pair['control_sample'] ]
else:
samples = r.json()['attributes']["samples"]['items']
return [ sample['entityName'] for sample in samples ]
# Case 2: retrieve all samples within a workspace
return __get_entities(args, "sample", page_size=2000) | [
"def",
"sample_list",
"(",
"args",
")",
":",
"# Case 1: retrieve samples within a named data entity",
"if",
"args",
".",
"entity_type",
"and",
"args",
".",
"entity",
":",
"# Edge case: caller asked for samples within a sample (itself)",
"if",
"args",
".",
"entity_type",
"==... | List samples within a container. | [
"List",
"samples",
"within",
"a",
"container",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L327-L355 | train | 38,914 |
broadinstitute/fiss | firecloud/fiss.py | entity_delete | def entity_delete(args):
""" Delete entity in a workspace. """
msg = "WARNING: this will delete {0} {1} in {2}/{3}".format(
args.entity_type, args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt(msg)):
return
json_body=[{"entityType": args.entity_type,
"entityName": args.entity}]
r = fapi.delete_entities(args.project, args.workspace, json_body)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
print("Succesfully deleted " + args.type + " " + args.entity) | python | def entity_delete(args):
""" Delete entity in a workspace. """
msg = "WARNING: this will delete {0} {1} in {2}/{3}".format(
args.entity_type, args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt(msg)):
return
json_body=[{"entityType": args.entity_type,
"entityName": args.entity}]
r = fapi.delete_entities(args.project, args.workspace, json_body)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
print("Succesfully deleted " + args.type + " " + args.entity) | [
"def",
"entity_delete",
"(",
"args",
")",
":",
"msg",
"=",
"\"WARNING: this will delete {0} {1} in {2}/{3}\"",
".",
"format",
"(",
"args",
".",
"entity_type",
",",
"args",
".",
"entity",
",",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"if",
... | Delete entity in a workspace. | [
"Delete",
"entity",
"in",
"a",
"workspace",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L363-L377 | train | 38,915 |
broadinstitute/fiss | firecloud/fiss.py | meth_wdl | def meth_wdl(args):
''' Retrieve WDL for given version of a repository method'''
r = fapi.get_repository_method(args.namespace, args.method,
args.snapshot_id, True)
fapi._check_response_code(r, 200)
return r.text | python | def meth_wdl(args):
''' Retrieve WDL for given version of a repository method'''
r = fapi.get_repository_method(args.namespace, args.method,
args.snapshot_id, True)
fapi._check_response_code(r, 200)
return r.text | [
"def",
"meth_wdl",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_repository_method",
"(",
"args",
".",
"namespace",
",",
"args",
".",
"method",
",",
"args",
".",
"snapshot_id",
",",
"True",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
... | Retrieve WDL for given version of a repository method | [
"Retrieve",
"WDL",
"for",
"given",
"version",
"of",
"a",
"repository",
"method"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L422-L427 | train | 38,916 |
broadinstitute/fiss | firecloud/fiss.py | meth_acl | def meth_acl(args):
''' Retrieve access control list for given version of a repository method'''
r = fapi.get_repository_method_acl(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | python | def meth_acl(args):
''' Retrieve access control list for given version of a repository method'''
r = fapi.get_repository_method_acl(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | [
"def",
"meth_acl",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_repository_method_acl",
"(",
"args",
".",
"namespace",
",",
"args",
".",
"method",
",",
"args",
".",
"snapshot_id",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",... | Retrieve access control list for given version of a repository method | [
"Retrieve",
"access",
"control",
"list",
"for",
"given",
"version",
"of",
"a",
"repository",
"method"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L430-L436 | train | 38,917 |
broadinstitute/fiss | firecloud/fiss.py | meth_set_acl | def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0 | python | def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0 | [
"def",
"meth_set_acl",
"(",
"args",
")",
":",
"acl_updates",
"=",
"[",
"{",
"\"user\"",
":",
"user",
",",
"\"role\"",
":",
"args",
".",
"role",
"}",
"for",
"user",
"in",
"set",
"(",
"expand_fc_groups",
"(",
"args",
".",
"users",
")",
")",
"if",
"user... | Assign an ACL role to a list of users for a workflow. | [
"Assign",
"an",
"ACL",
"role",
"to",
"a",
"list",
"of",
"users",
"for",
"a",
"workflow",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L439-L466 | train | 38,918 |
broadinstitute/fiss | firecloud/fiss.py | expand_fc_groups | def expand_fc_groups(users):
""" If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this.
"""
groups = None
for user in users:
fcgroup = None
if '@' not in user:
fcgroup = user
elif user.lower().endswith('@firecloud.org'):
if groups is None:
r = fapi.get_groups()
fapi._check_response_code(r, 200)
groups = {group['groupEmail'].lower():group['groupName'] \
for group in r.json() if group['role'] == 'Admin'}
if user.lower() not in groups:
if fcconfig.verbosity:
eprint("You do not have access to the members of {}".format(user))
yield user
continue
else:
fcgroup = groups[user.lower()]
else:
yield user
continue
r = fapi.get_group(fcgroup)
fapi._check_response_code(r, 200)
fcgroup_data = r.json()
for admin in fcgroup_data['adminsEmails']:
yield admin
for member in fcgroup_data['membersEmails']:
yield member | python | def expand_fc_groups(users):
""" If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this.
"""
groups = None
for user in users:
fcgroup = None
if '@' not in user:
fcgroup = user
elif user.lower().endswith('@firecloud.org'):
if groups is None:
r = fapi.get_groups()
fapi._check_response_code(r, 200)
groups = {group['groupEmail'].lower():group['groupName'] \
for group in r.json() if group['role'] == 'Admin'}
if user.lower() not in groups:
if fcconfig.verbosity:
eprint("You do not have access to the members of {}".format(user))
yield user
continue
else:
fcgroup = groups[user.lower()]
else:
yield user
continue
r = fapi.get_group(fcgroup)
fapi._check_response_code(r, 200)
fcgroup_data = r.json()
for admin in fcgroup_data['adminsEmails']:
yield admin
for member in fcgroup_data['membersEmails']:
yield member | [
"def",
"expand_fc_groups",
"(",
"users",
")",
":",
"groups",
"=",
"None",
"for",
"user",
"in",
"users",
":",
"fcgroup",
"=",
"None",
"if",
"'@'",
"not",
"in",
"user",
":",
"fcgroup",
"=",
"user",
"elif",
"user",
".",
"lower",
"(",
")",
".",
"endswith... | If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this. | [
"If",
"user",
"is",
"a",
"firecloud",
"group",
"return",
"all",
"members",
"of",
"the",
"group",
".",
"Caveat",
"is",
"that",
"only",
"group",
"admins",
"may",
"do",
"this",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L468-L499 | train | 38,919 |
broadinstitute/fiss | firecloud/fiss.py | meth_list | def meth_list(args):
""" List workflows in the methods repository """
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
n = m['name']
sn_id = m['snapshotId']
results.append('{0}\t{1}\t{2}'.format(ns,n,sn_id))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower()) | python | def meth_list(args):
""" List workflows in the methods repository """
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
n = m['name']
sn_id = m['snapshotId']
results.append('{0}\t{1}\t{2}'.format(ns,n,sn_id))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower()) | [
"def",
"meth_list",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"list_repository_methods",
"(",
"namespace",
"=",
"args",
".",
"namespace",
",",
"name",
"=",
"args",
".",
"method",
",",
"snapshotId",
"=",
"args",
".",
"snapshot_id",
")",
"fapi",
".",
... | List workflows in the methods repository | [
"List",
"workflows",
"in",
"the",
"methods",
"repository"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L502-L519 | train | 38,920 |
broadinstitute/fiss | firecloud/fiss.py | config_acl | def config_acl(args):
''' Retrieve access control list for a method configuration'''
r = fapi.get_repository_config_acl(args.namespace, args.config,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | python | def config_acl(args):
''' Retrieve access control list for a method configuration'''
r = fapi.get_repository_config_acl(args.namespace, args.config,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls) | [
"def",
"config_acl",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_repository_config_acl",
"(",
"args",
".",
"namespace",
",",
"args",
".",
"config",
",",
"args",
".",
"snapshot_id",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")... | Retrieve access control list for a method configuration | [
"Retrieve",
"access",
"control",
"list",
"for",
"a",
"method",
"configuration"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L602-L608 | train | 38,921 |
broadinstitute/fiss | firecloud/fiss.py | config_set_acl | def config_set_acl(args):
""" Assign an ACL role to a list of users for a config. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("Configuration {0}/{1} not found".format(args.namespace,
args.config))
return 1
latest = sorted(versions, key=lambda c: c['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_config_acl(args.namespace, args.config, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.config,
id))
return 0 | python | def config_set_acl(args):
""" Assign an ACL role to a list of users for a config. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("Configuration {0}/{1} not found".format(args.namespace,
args.config))
return 1
latest = sorted(versions, key=lambda c: c['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_config_acl(args.namespace, args.config, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.config,
id))
return 0 | [
"def",
"config_set_acl",
"(",
"args",
")",
":",
"acl_updates",
"=",
"[",
"{",
"\"user\"",
":",
"user",
",",
"\"role\"",
":",
"args",
".",
"role",
"}",
"for",
"user",
"in",
"set",
"(",
"expand_fc_groups",
"(",
"args",
".",
"users",
")",
")",
"if",
"us... | Assign an ACL role to a list of users for a config. | [
"Assign",
"an",
"ACL",
"role",
"to",
"a",
"list",
"of",
"users",
"for",
"a",
"config",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L611-L638 | train | 38,922 |
broadinstitute/fiss | firecloud/fiss.py | config_get | def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False) | python | def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False) | [
"def",
"config_get",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_workspace_config",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"args",
".",
"namespace",
",",
"args",
".",
"config",
")",
"fapi",
".",
"_check_response_code",
"(... | Retrieve a method config from a workspace, send stdout | [
"Retrieve",
"a",
"method",
"config",
"from",
"a",
"workspace",
"send",
"stdout"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L641-L648 | train | 38,923 |
broadinstitute/fiss | firecloud/fiss.py | config_wdl | def config_wdl(args):
""" Retrieve the WDL for a method config in a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
method = r.json()["methodRepoMethod"]
args.namespace = method["methodNamespace"]
args.method = method["methodName"]
args.snapshot_id = method["methodVersion"]
return meth_wdl(args) | python | def config_wdl(args):
""" Retrieve the WDL for a method config in a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
method = r.json()["methodRepoMethod"]
args.namespace = method["methodNamespace"]
args.method = method["methodName"]
args.snapshot_id = method["methodVersion"]
return meth_wdl(args) | [
"def",
"config_wdl",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"get_workspace_config",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"args",
".",
"namespace",
",",
"args",
".",
"config",
")",
"fapi",
".",
"_check_response_code",
"(... | Retrieve the WDL for a method config in a workspace, send stdout | [
"Retrieve",
"the",
"WDL",
"for",
"a",
"method",
"config",
"in",
"a",
"workspace",
"send",
"stdout"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L651-L662 | train | 38,924 |
broadinstitute/fiss | firecloud/fiss.py | config_diff | def config_diff(args):
"""Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set"""
config_1 = config_get(args).splitlines()
args.project = args.Project
args.workspace = args.Workspace
cfg_1_name = args.config
if args.Config is not None:
args.config = args.Config
if args.Namespace is not None:
args.namespace = args.Namespace
config_2 = config_get(args).splitlines()
if not args.verbose:
config_1 = skip_cfg_ver(config_1)
config_2 = skip_cfg_ver(config_2)
return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm='')) | python | def config_diff(args):
"""Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set"""
config_1 = config_get(args).splitlines()
args.project = args.Project
args.workspace = args.Workspace
cfg_1_name = args.config
if args.Config is not None:
args.config = args.Config
if args.Namespace is not None:
args.namespace = args.Namespace
config_2 = config_get(args).splitlines()
if not args.verbose:
config_1 = skip_cfg_ver(config_1)
config_2 = skip_cfg_ver(config_2)
return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm='')) | [
"def",
"config_diff",
"(",
"args",
")",
":",
"config_1",
"=",
"config_get",
"(",
"args",
")",
".",
"splitlines",
"(",
")",
"args",
".",
"project",
"=",
"args",
".",
"Project",
"args",
".",
"workspace",
"=",
"args",
".",
"Workspace",
"cfg_1_name",
"=",
... | Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set | [
"Compare",
"method",
"configuration",
"definitions",
"across",
"workspaces",
".",
"Ignores",
"methodConfigVersion",
"if",
"the",
"verbose",
"argument",
"is",
"not",
"set"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L665-L680 | train | 38,925 |
broadinstitute/fiss | firecloud/fiss.py | config_delete | def config_delete(args):
""" Remove a method config from a workspace """
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None | python | def config_delete(args):
""" Remove a method config from a workspace """
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None | [
"def",
"config_delete",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"delete_workspace_config",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"args",
".",
"namespace",
",",
"args",
".",
"config",
")",
"fapi",
".",
"_check_response_code"... | Remove a method config from a workspace | [
"Remove",
"a",
"method",
"config",
"from",
"a",
"workspace"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L768-L773 | train | 38,926 |
broadinstitute/fiss | firecloud/fiss.py | attr_copy | def attr_copy(args):
""" Copy workspace attributes between workspaces. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
# First get the workspace attributes of the source workspace
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
# Parse the attributes
workspace_attrs = r.json()['workspace']['attributes']
# If we passed attributes, only use those
if args.attributes:
workspace_attrs = {k:v for k, v in iteritems(workspace_attrs)
if k in args.attributes}
if len(workspace_attrs) == 0:
print("No workspace attributes defined in {0}/{1}".format(
args.project, args.workspace))
return 1
message = "This will copy the following workspace attributes to {0}/{1}\n"
message = message.format(args.to_project, args.to_workspace)
for k, v in sorted(iteritems(workspace_attrs)):
message += '\t{0}\t{1}\n'.format(k, v)
if not args.yes and not _confirm_prompt(message):
return 0
# make the attributes into updates
updates = [fapi._attr_set(k,v) for k,v in iteritems(workspace_attrs)]
r = fapi.update_workspace_attributes(args.to_project, args.to_workspace,
updates)
fapi._check_response_code(r, 200)
return 0 | python | def attr_copy(args):
""" Copy workspace attributes between workspaces. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
# First get the workspace attributes of the source workspace
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
# Parse the attributes
workspace_attrs = r.json()['workspace']['attributes']
# If we passed attributes, only use those
if args.attributes:
workspace_attrs = {k:v for k, v in iteritems(workspace_attrs)
if k in args.attributes}
if len(workspace_attrs) == 0:
print("No workspace attributes defined in {0}/{1}".format(
args.project, args.workspace))
return 1
message = "This will copy the following workspace attributes to {0}/{1}\n"
message = message.format(args.to_project, args.to_workspace)
for k, v in sorted(iteritems(workspace_attrs)):
message += '\t{0}\t{1}\n'.format(k, v)
if not args.yes and not _confirm_prompt(message):
return 0
# make the attributes into updates
updates = [fapi._attr_set(k,v) for k,v in iteritems(workspace_attrs)]
r = fapi.update_workspace_attributes(args.to_project, args.to_workspace,
updates)
fapi._check_response_code(r, 200)
return 0 | [
"def",
"attr_copy",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"to_workspace",
":",
"args",
".",
"to_workspace",
"=",
"args",
".",
"workspace",
"if",
"not",
"args",
".",
"to_project",
":",
"args",
".",
"to_project",
"=",
"args",
".",
"project",
"... | Copy workspace attributes between workspaces. | [
"Copy",
"workspace",
"attributes",
"between",
"workspaces",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1003-L1045 | train | 38,927 |
broadinstitute/fiss | firecloud/fiss.py | health | def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content | python | def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content | [
"def",
"health",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"health",
"(",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"return",
"r",
".",
"content"
] | Health FireCloud Server | [
"Health",
"FireCloud",
"Server"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1163-L1167 | train | 38,928 |
broadinstitute/fiss | firecloud/fiss.py | sset_loop | def sset_loop(args):
''' Loop over all sample sets in a workspace, performing a func '''
# Ensure that the requested action is a valid fiss_cmd
fiss_func = __cmd_to_func(args.action)
if not fiss_func:
eprint("invalid FISS cmd '" + args.action + "'")
return 1
# First get the sample set names
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [entity['name'] for entity in r.json()]
args.entity_type = "sample_set"
for sset in sample_sets:
print('\n# {0}::{1}/{2} {3}'.format(args.project, args.workspace, sset,
args.action))
args.entity = sset
# Note how this code is similar to how args.func is called in
# main so it may make sense to try to a common method for both
try:
result = fiss_func(args)
except Exception as e:
status = __pretty_print_fc_exception(e)
if not args.keep_going:
return status
printToCLI(result)
return 0 | python | def sset_loop(args):
''' Loop over all sample sets in a workspace, performing a func '''
# Ensure that the requested action is a valid fiss_cmd
fiss_func = __cmd_to_func(args.action)
if not fiss_func:
eprint("invalid FISS cmd '" + args.action + "'")
return 1
# First get the sample set names
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [entity['name'] for entity in r.json()]
args.entity_type = "sample_set"
for sset in sample_sets:
print('\n# {0}::{1}/{2} {3}'.format(args.project, args.workspace, sset,
args.action))
args.entity = sset
# Note how this code is similar to how args.func is called in
# main so it may make sense to try to a common method for both
try:
result = fiss_func(args)
except Exception as e:
status = __pretty_print_fc_exception(e)
if not args.keep_going:
return status
printToCLI(result)
return 0 | [
"def",
"sset_loop",
"(",
"args",
")",
":",
"# Ensure that the requested action is a valid fiss_cmd",
"fiss_func",
"=",
"__cmd_to_func",
"(",
"args",
".",
"action",
")",
"if",
"not",
"fiss_func",
":",
"eprint",
"(",
"\"invalid FISS cmd '\"",
"+",
"args",
".",
"action... | Loop over all sample sets in a workspace, performing a func | [
"Loop",
"over",
"all",
"sample",
"sets",
"in",
"a",
"workspace",
"performing",
"a",
"func"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1304-L1333 | train | 38,929 |
broadinstitute/fiss | firecloud/fiss.py | monitor | def monitor(args):
''' Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date'''
r = fapi.list_submissions(args.project, args.workspace)
fapi._check_response_code(r, 200)
statuses = sorted(r.json(), key=lambda k: k['submissionDate'], reverse=True)
header = '\t'.join(list(statuses[0].keys()))
expander = lambda v: '{0}'.format(v)
def expander(thing):
if isinstance(thing, dict):
entityType = thing.get("entityType", None)
if entityType:
return "{0}:{1}".format(entityType, thing['entityName'])
return "{0}".format(thing)
# FIXME: this will generally return different column order between Python 2/3
return [header] + ['\t'.join( map(expander, v.values())) for v in statuses] | python | def monitor(args):
''' Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date'''
r = fapi.list_submissions(args.project, args.workspace)
fapi._check_response_code(r, 200)
statuses = sorted(r.json(), key=lambda k: k['submissionDate'], reverse=True)
header = '\t'.join(list(statuses[0].keys()))
expander = lambda v: '{0}'.format(v)
def expander(thing):
if isinstance(thing, dict):
entityType = thing.get("entityType", None)
if entityType:
return "{0}:{1}".format(entityType, thing['entityName'])
return "{0}".format(thing)
# FIXME: this will generally return different column order between Python 2/3
return [header] + ['\t'.join( map(expander, v.values())) for v in statuses] | [
"def",
"monitor",
"(",
"args",
")",
":",
"r",
"=",
"fapi",
".",
"list_submissions",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"statuses",
"=",
"sorted",
"(",
"r",
... | Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date | [
"Retrieve",
"status",
"of",
"jobs",
"submitted",
"from",
"a",
"given",
"workspace",
"as",
"a",
"list",
"of",
"TSV",
"lines",
"sorted",
"by",
"descending",
"order",
"of",
"job",
"submission",
"date"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1336-L1353 | train | 38,930 |
broadinstitute/fiss | firecloud/fiss.py | supervise | def supervise(args):
''' Run legacy, Firehose-style workflow of workflows'''
project = args.project
workspace = args.workspace
namespace = args.namespace
workflow = args.workflow
sample_sets = args.sample_sets
recovery_file = args.json_checkpoint
# If no sample sets are provided, run on all sample sets
if not sample_sets:
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [s['name'] for s in r.json()]
message = "Sample Sets ({}):\n\t".format(len(sample_sets)) + \
"\n\t".join(sample_sets)
prompt = "\nLaunch workflow in " + project + "/" + workspace + \
" on these sample sets? [Y\\n]: "
if not args.yes and not _confirm_prompt(message, prompt):
return
return supervisor.supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file) | python | def supervise(args):
''' Run legacy, Firehose-style workflow of workflows'''
project = args.project
workspace = args.workspace
namespace = args.namespace
workflow = args.workflow
sample_sets = args.sample_sets
recovery_file = args.json_checkpoint
# If no sample sets are provided, run on all sample sets
if not sample_sets:
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [s['name'] for s in r.json()]
message = "Sample Sets ({}):\n\t".format(len(sample_sets)) + \
"\n\t".join(sample_sets)
prompt = "\nLaunch workflow in " + project + "/" + workspace + \
" on these sample sets? [Y\\n]: "
if not args.yes and not _confirm_prompt(message, prompt):
return
return supervisor.supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file) | [
"def",
"supervise",
"(",
"args",
")",
":",
"project",
"=",
"args",
".",
"project",
"workspace",
"=",
"args",
".",
"workspace",
"namespace",
"=",
"args",
".",
"namespace",
"workflow",
"=",
"args",
".",
"workflow",
"sample_sets",
"=",
"args",
".",
"sample_se... | Run legacy, Firehose-style workflow of workflows | [
"Run",
"legacy",
"Firehose",
"-",
"style",
"workflow",
"of",
"workflows"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1356-L1381 | train | 38,931 |
broadinstitute/fiss | firecloud/fiss.py | entity_copy | def entity_copy(args):
""" Copy entities from one workspace to another. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
if not args.entities:
# get a list of entities from source workspace matching entity_type
ents = _entity_paginator(args.project, args.workspace, args.entity_type,
page_size=500, filter_terms=None,
sort_direction='asc')
args.entities = [e['name'] for e in ents]
prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: "
prompt = prompt.format(len(args.entities), args.entity_type, args.project,
args.workspace, args.to_project, args.to_workspace)
if not args.yes and not _confirm_prompt("", prompt):
return
r = fapi.copy_entities(args.project, args.workspace, args.to_project,
args.to_workspace, args.entity_type, args.entities,
link_existing_entities=args.link)
fapi._check_response_code(r, 201)
return 0 | python | def entity_copy(args):
""" Copy entities from one workspace to another. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
if not args.entities:
# get a list of entities from source workspace matching entity_type
ents = _entity_paginator(args.project, args.workspace, args.entity_type,
page_size=500, filter_terms=None,
sort_direction='asc')
args.entities = [e['name'] for e in ents]
prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: "
prompt = prompt.format(len(args.entities), args.entity_type, args.project,
args.workspace, args.to_project, args.to_workspace)
if not args.yes and not _confirm_prompt("", prompt):
return
r = fapi.copy_entities(args.project, args.workspace, args.to_project,
args.to_workspace, args.entity_type, args.entities,
link_existing_entities=args.link)
fapi._check_response_code(r, 201)
return 0 | [
"def",
"entity_copy",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"to_workspace",
":",
"args",
".",
"to_workspace",
"=",
"args",
".",
"workspace",
"if",
"not",
"args",
".",
"to_project",
":",
"args",
".",
"to_project",
"=",
"args",
".",
"project",
... | Copy entities from one workspace to another. | [
"Copy",
"entities",
"from",
"one",
"workspace",
"to",
"another",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1389-L1419 | train | 38,932 |
broadinstitute/fiss | firecloud/fiss.py | _validate_helper | def _validate_helper(args, config_d, workspace_d, entity_d=None):
""" Return FISSFC validation information on config for a certain entity """
# 4 ways to have invalid config:
invalid_inputs = sorted(config_d["invalidInputs"])
invalid_outputs = sorted(config_d["invalidOutputs"])
# Also insert values for invalid i/o
invalid_inputs = [(i, config_d['methodConfiguration']['inputs'][i]) for i in invalid_inputs]
invalid_outputs = [(i, config_d['methodConfiguration']['outputs'][i]) for i in invalid_outputs]
missing_attrs = []
missing_wksp_attrs = []
# If an entity was provided, also check to see if that entity has the necessary inputs
if entity_d:
entity_type = config_d['methodConfiguration']['rootEntityType']
# If the attribute is listed here, it has an entry
entity_attrs = set(entity_d['attributes'])
# Optimization, only get the workspace attrs if the method config has any
workspace_attrs = workspace_d['workspace']['attributes']
# So now iterate over the inputs
for inp, val in iteritems(config_d['methodConfiguration']['inputs']):
# Must be an attribute on the entity
if val.startswith("this."):
# Normally, the value is of the form 'this.attribute',
# but for operations on sets, e.g. one can also do
# 'this.samples.attr'. But even in this case, there must be a
# 'samples' attribute on the sample set, so checking for the middle
# value works as expected. Other pathological cases would've been
# caught above by the validation endpoint
expected_attr = val.split('.')[1]
# 'name' is special, it really means '_id', which everything has
if expected_attr == "name":
continue
if expected_attr not in entity_attrs:
missing_attrs.append((inp, val))
if val.startswith("workspace."):
# Anything not matching this format will be caught above
expected_attr = val.split('.')[1]
if expected_attr not in workspace_attrs:
missing_wksp_attrs.append((inp, val))
# Anything else is a literal
return invalid_inputs, invalid_outputs, missing_attrs, missing_wksp_attrs | python | def _validate_helper(args, config_d, workspace_d, entity_d=None):
""" Return FISSFC validation information on config for a certain entity """
# 4 ways to have invalid config:
invalid_inputs = sorted(config_d["invalidInputs"])
invalid_outputs = sorted(config_d["invalidOutputs"])
# Also insert values for invalid i/o
invalid_inputs = [(i, config_d['methodConfiguration']['inputs'][i]) for i in invalid_inputs]
invalid_outputs = [(i, config_d['methodConfiguration']['outputs'][i]) for i in invalid_outputs]
missing_attrs = []
missing_wksp_attrs = []
# If an entity was provided, also check to see if that entity has the necessary inputs
if entity_d:
entity_type = config_d['methodConfiguration']['rootEntityType']
# If the attribute is listed here, it has an entry
entity_attrs = set(entity_d['attributes'])
# Optimization, only get the workspace attrs if the method config has any
workspace_attrs = workspace_d['workspace']['attributes']
# So now iterate over the inputs
for inp, val in iteritems(config_d['methodConfiguration']['inputs']):
# Must be an attribute on the entity
if val.startswith("this."):
# Normally, the value is of the form 'this.attribute',
# but for operations on sets, e.g. one can also do
# 'this.samples.attr'. But even in this case, there must be a
# 'samples' attribute on the sample set, so checking for the middle
# value works as expected. Other pathological cases would've been
# caught above by the validation endpoint
expected_attr = val.split('.')[1]
# 'name' is special, it really means '_id', which everything has
if expected_attr == "name":
continue
if expected_attr not in entity_attrs:
missing_attrs.append((inp, val))
if val.startswith("workspace."):
# Anything not matching this format will be caught above
expected_attr = val.split('.')[1]
if expected_attr not in workspace_attrs:
missing_wksp_attrs.append((inp, val))
# Anything else is a literal
return invalid_inputs, invalid_outputs, missing_attrs, missing_wksp_attrs | [
"def",
"_validate_helper",
"(",
"args",
",",
"config_d",
",",
"workspace_d",
",",
"entity_d",
"=",
"None",
")",
":",
"# 4 ways to have invalid config:",
"invalid_inputs",
"=",
"sorted",
"(",
"config_d",
"[",
"\"invalidInputs\"",
"]",
")",
"invalid_outputs",
"=",
"... | Return FISSFC validation information on config for a certain entity | [
"Return",
"FISSFC",
"validation",
"information",
"on",
"config",
"for",
"a",
"certain",
"entity"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1474-L1521 | train | 38,933 |
broadinstitute/fiss | firecloud/fiss.py | _confirm_prompt | def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ",
affirmations=("Y", "Yes", "yes", "y")):
"""
Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations.
"""
answer = input(message + prompt)
return answer in affirmations | python | def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ",
affirmations=("Y", "Yes", "yes", "y")):
"""
Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations.
"""
answer = input(message + prompt)
return answer in affirmations | [
"def",
"_confirm_prompt",
"(",
"message",
",",
"prompt",
"=",
"\"\\nAre you sure? [y/yes (default: no)]: \"",
",",
"affirmations",
"=",
"(",
"\"Y\"",
",",
"\"Yes\"",
",",
"\"yes\"",
",",
"\"y\"",
")",
")",
":",
"answer",
"=",
"input",
"(",
"message",
"+",
"pro... | Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations. | [
"Display",
"a",
"message",
"then",
"confirmation",
"prompt",
"and",
"return",
"true",
"if",
"the",
"user",
"responds",
"with",
"one",
"of",
"the",
"affirmations",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1663-L1670 | train | 38,934 |
broadinstitute/fiss | firecloud/fiss.py | _nonempty_project | def _nonempty_project(string):
"""
Argparse validator for ensuring a workspace is provided
"""
value = str(string)
if len(value) == 0:
msg = "No project provided and no default project configured"
raise argparse.ArgumentTypeError(msg)
return value | python | def _nonempty_project(string):
"""
Argparse validator for ensuring a workspace is provided
"""
value = str(string)
if len(value) == 0:
msg = "No project provided and no default project configured"
raise argparse.ArgumentTypeError(msg)
return value | [
"def",
"_nonempty_project",
"(",
"string",
")",
":",
"value",
"=",
"str",
"(",
"string",
")",
"if",
"len",
"(",
"value",
")",
"==",
"0",
":",
"msg",
"=",
"\"No project provided and no default project configured\"",
"raise",
"argparse",
".",
"ArgumentTypeError",
... | Argparse validator for ensuring a workspace is provided | [
"Argparse",
"validator",
"for",
"ensuring",
"a",
"workspace",
"is",
"provided"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1672-L1680 | train | 38,935 |
broadinstitute/fiss | firecloud/fiss.py | _entity_paginator | def _entity_paginator(namespace, workspace, etype, page_size=500,
filter_terms=None, sort_direction="asc"):
"""Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing.
"""
page = 1
all_entities = []
# Make initial request
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
response_body = r.json()
# Get the total number of pages
total_pages = response_body['resultMetadata']['filteredPageCount']
# append the first set of results
entities = response_body['results']
all_entities.extend(entities)
# Now iterate over remaining pages to retrieve all the results
page = 2
while page <= total_pages:
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
entities = r.json()['results']
all_entities.extend(entities)
page += 1
return all_entities | python | def _entity_paginator(namespace, workspace, etype, page_size=500,
filter_terms=None, sort_direction="asc"):
"""Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing.
"""
page = 1
all_entities = []
# Make initial request
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
response_body = r.json()
# Get the total number of pages
total_pages = response_body['resultMetadata']['filteredPageCount']
# append the first set of results
entities = response_body['results']
all_entities.extend(entities)
# Now iterate over remaining pages to retrieve all the results
page = 2
while page <= total_pages:
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
entities = r.json()['results']
all_entities.extend(entities)
page += 1
return all_entities | [
"def",
"_entity_paginator",
"(",
"namespace",
",",
"workspace",
",",
"etype",
",",
"page_size",
"=",
"500",
",",
"filter_terms",
"=",
"None",
",",
"sort_direction",
"=",
"\"asc\"",
")",
":",
"page",
"=",
"1",
"all_entities",
"=",
"[",
"]",
"# Make initial re... | Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing. | [
"Pages",
"through",
"the",
"get_entities_query",
"endpoint",
"to",
"get",
"all",
"entities",
"in",
"the",
"workspace",
"without",
"crashing",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1682-L1714 | train | 38,936 |
broadinstitute/fiss | firecloud/fiss.py | __cmd_to_func | def __cmd_to_func(cmd):
""" Returns the function object in this module matching cmd. """
fiss_module = sys.modules[__name__]
# Returns None if string is not a recognized FISS command
func = getattr(fiss_module, cmd, None)
if func and not hasattr(func, 'fiss_cmd'):
func = None
return func | python | def __cmd_to_func(cmd):
""" Returns the function object in this module matching cmd. """
fiss_module = sys.modules[__name__]
# Returns None if string is not a recognized FISS command
func = getattr(fiss_module, cmd, None)
if func and not hasattr(func, 'fiss_cmd'):
func = None
return func | [
"def",
"__cmd_to_func",
"(",
"cmd",
")",
":",
"fiss_module",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"# Returns None if string is not a recognized FISS command",
"func",
"=",
"getattr",
"(",
"fiss_module",
",",
"cmd",
",",
"None",
")",
"if",
"func",
"a... | Returns the function object in this module matching cmd. | [
"Returns",
"the",
"function",
"object",
"in",
"this",
"module",
"matching",
"cmd",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1720-L1727 | train | 38,937 |
broadinstitute/fiss | firecloud/fiss.py | _valid_headerline | def _valid_headerline(l):
"""return true if the given string is a valid loadfile header"""
if not l:
return False
headers = l.split('\t')
first_col = headers[0]
tsplit = first_col.split(':')
if len(tsplit) != 2:
return False
if tsplit[0] in ('entity', 'update'):
return tsplit[1] in ('participant_id', 'participant_set_id',
'sample_id', 'sample_set_id',
'pair_id', 'pair_set_id')
elif tsplit[0] == 'membership':
if len(headers) < 2:
return False
# membership:sample_set_id sample_id, e.g.
return tsplit[1].replace('set_', '') == headers[1]
else:
return False | python | def _valid_headerline(l):
"""return true if the given string is a valid loadfile header"""
if not l:
return False
headers = l.split('\t')
first_col = headers[0]
tsplit = first_col.split(':')
if len(tsplit) != 2:
return False
if tsplit[0] in ('entity', 'update'):
return tsplit[1] in ('participant_id', 'participant_set_id',
'sample_id', 'sample_set_id',
'pair_id', 'pair_set_id')
elif tsplit[0] == 'membership':
if len(headers) < 2:
return False
# membership:sample_set_id sample_id, e.g.
return tsplit[1].replace('set_', '') == headers[1]
else:
return False | [
"def",
"_valid_headerline",
"(",
"l",
")",
":",
"if",
"not",
"l",
":",
"return",
"False",
"headers",
"=",
"l",
".",
"split",
"(",
"'\\t'",
")",
"first_col",
"=",
"headers",
"[",
"0",
"]",
"tsplit",
"=",
"first_col",
".",
"split",
"(",
"':'",
")",
"... | return true if the given string is a valid loadfile header | [
"return",
"true",
"if",
"the",
"given",
"string",
"is",
"a",
"valid",
"loadfile",
"header"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1729-L1751 | train | 38,938 |
broadinstitute/fiss | firecloud/fiss.py | _batch_load | def _batch_load(project, workspace, headerline, entity_data, chunk_size=500):
""" Submit a large number of entity updates in batches of chunk_size """
if fcconfig.verbosity:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
# Parse the entity type from the first cell, e.g. "entity:sample_id"
# First check that the header is valid
if not _valid_headerline(headerline):
eprint("Invalid loadfile header:\n" + headerline)
return 1
update_type = "membership" if headerline.startswith("membership") else "entitie"
etype = headerline.split('\t')[0].split(':')[1].replace("_id", "")
# Split entity_data into chunks
total = int(len(entity_data) / chunk_size) + 1
batch = 0
for i in range(0, len(entity_data), chunk_size):
batch += 1
if fcconfig.verbosity:
print("Updating {0} {1}s {2}-{3}, batch {4}/{5}".format(
etype, update_type, i+1, min(i+chunk_size, len(entity_data)),
batch, total))
this_data = headerline + '\n' + '\n'.join(entity_data[i:i+chunk_size])
# Now push the entity data to firecloud
r = fapi.upload_entities(project, workspace, this_data)
fapi._check_response_code(r, 200)
return 0 | python | def _batch_load(project, workspace, headerline, entity_data, chunk_size=500):
""" Submit a large number of entity updates in batches of chunk_size """
if fcconfig.verbosity:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
# Parse the entity type from the first cell, e.g. "entity:sample_id"
# First check that the header is valid
if not _valid_headerline(headerline):
eprint("Invalid loadfile header:\n" + headerline)
return 1
update_type = "membership" if headerline.startswith("membership") else "entitie"
etype = headerline.split('\t')[0].split(':')[1].replace("_id", "")
# Split entity_data into chunks
total = int(len(entity_data) / chunk_size) + 1
batch = 0
for i in range(0, len(entity_data), chunk_size):
batch += 1
if fcconfig.verbosity:
print("Updating {0} {1}s {2}-{3}, batch {4}/{5}".format(
etype, update_type, i+1, min(i+chunk_size, len(entity_data)),
batch, total))
this_data = headerline + '\n' + '\n'.join(entity_data[i:i+chunk_size])
# Now push the entity data to firecloud
r = fapi.upload_entities(project, workspace, this_data)
fapi._check_response_code(r, 200)
return 0 | [
"def",
"_batch_load",
"(",
"project",
",",
"workspace",
",",
"headerline",
",",
"entity_data",
",",
"chunk_size",
"=",
"500",
")",
":",
"if",
"fcconfig",
".",
"verbosity",
":",
"print",
"(",
"\"Batching \"",
"+",
"str",
"(",
"len",
"(",
"entity_data",
")",... | Submit a large number of entity updates in batches of chunk_size | [
"Submit",
"a",
"large",
"number",
"of",
"entity",
"updates",
"in",
"batches",
"of",
"chunk_size"
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1753-L1784 | train | 38,939 |
broadinstitute/fiss | firecloud/entity.py | Entity.create_payload | def create_payload(entities):
"""Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities()
"""
#First check that all entities are of the same type
types = {e.etype for e in entities}
if len(types) != 1:
raise ValueError("Can't create payload with " +
str(len(types)) + " types")
all_attrs = set()
for e in entities:
all_attrs.update(set(e.attrs.keys()))
#Write a header line
all_attrs = list(all_attrs)
header = "entity:" + entities[0].etype + "_id"
payload = '\t'.join([header] + all_attrs) + '\n'
for e in entities:
line = e.entity_id
for a in all_attrs:
line += '\t' + e.attrs.get(a, "")
payload += line + '\n'
return payload | python | def create_payload(entities):
"""Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities()
"""
#First check that all entities are of the same type
types = {e.etype for e in entities}
if len(types) != 1:
raise ValueError("Can't create payload with " +
str(len(types)) + " types")
all_attrs = set()
for e in entities:
all_attrs.update(set(e.attrs.keys()))
#Write a header line
all_attrs = list(all_attrs)
header = "entity:" + entities[0].etype + "_id"
payload = '\t'.join([header] + all_attrs) + '\n'
for e in entities:
line = e.entity_id
for a in all_attrs:
line += '\t' + e.attrs.get(a, "")
payload += line + '\n'
return payload | [
"def",
"create_payload",
"(",
"entities",
")",
":",
"#First check that all entities are of the same type",
"types",
"=",
"{",
"e",
".",
"etype",
"for",
"e",
"in",
"entities",
"}",
"if",
"len",
"(",
"types",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\... | Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities() | [
"Create",
"a",
"tsv",
"payload",
"describing",
"entities",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/entity.py#L40-L70 | train | 38,940 |
broadinstitute/fiss | firecloud/entity.py | Entity.create_loadfile | def create_loadfile(entities, f):
"""Create payload and save to file."""
with open(f, 'w') as out:
out.write(Entity.create_payload(entities)) | python | def create_loadfile(entities, f):
"""Create payload and save to file."""
with open(f, 'w') as out:
out.write(Entity.create_payload(entities)) | [
"def",
"create_loadfile",
"(",
"entities",
",",
"f",
")",
":",
"with",
"open",
"(",
"f",
",",
"'w'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"Entity",
".",
"create_payload",
"(",
"entities",
")",
")"
] | Create payload and save to file. | [
"Create",
"payload",
"and",
"save",
"to",
"file",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/entity.py#L73-L76 | train | 38,941 |
broadinstitute/fiss | setup.py | InstallCommand.needs_gcloud | def needs_gcloud(self):
"""Returns true if gcloud is unavailable and needed for
authentication."""
gcloud_default_path = ['google-cloud-sdk', 'bin']
if platform.system() != "Windows":
gcloud_default_path = os.path.join(os.path.expanduser('~'),
*gcloud_default_path)
else:
gcloud_default_path = os.path.join(os.environ['LOCALAPPDATA'],
'Google', 'Cloud SDK',
*gcloud_default_path)
return not os.getenv('SERVER_SOFTWARE',
'').startswith('Google App Engine/') \
and gcloud_default_path not in os.environ["PATH"].split(os.pathsep) \
and which('gcloud') is None | python | def needs_gcloud(self):
"""Returns true if gcloud is unavailable and needed for
authentication."""
gcloud_default_path = ['google-cloud-sdk', 'bin']
if platform.system() != "Windows":
gcloud_default_path = os.path.join(os.path.expanduser('~'),
*gcloud_default_path)
else:
gcloud_default_path = os.path.join(os.environ['LOCALAPPDATA'],
'Google', 'Cloud SDK',
*gcloud_default_path)
return not os.getenv('SERVER_SOFTWARE',
'').startswith('Google App Engine/') \
and gcloud_default_path not in os.environ["PATH"].split(os.pathsep) \
and which('gcloud') is None | [
"def",
"needs_gcloud",
"(",
"self",
")",
":",
"gcloud_default_path",
"=",
"[",
"'google-cloud-sdk'",
",",
"'bin'",
"]",
"if",
"platform",
".",
"system",
"(",
")",
"!=",
"\"Windows\"",
":",
"gcloud_default_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"o... | Returns true if gcloud is unavailable and needed for
authentication. | [
"Returns",
"true",
"if",
"gcloud",
"is",
"unavailable",
"and",
"needed",
"for",
"authentication",
"."
] | dddf91547479506dbbafb69ec84d44dcc4a94ab4 | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/setup.py#L16-L30 | train | 38,942 |
fhcrc/seqmagick | seqmagick/subcommands/mogrify.py | action | def action(arguments):
"""
Run mogrify. Most of the action is in convert, this just creates a temp
file for the output.
"""
for input_file in arguments.input_files:
logging.info(input_file)
# Generate a temporary file
with common.atomic_write(
input_file.name, file_factory=common.FileType('wt')) as tf:
convert.transform_file(input_file, tf, arguments)
if hasattr(input_file, 'close'):
input_file.close() | python | def action(arguments):
"""
Run mogrify. Most of the action is in convert, this just creates a temp
file for the output.
"""
for input_file in arguments.input_files:
logging.info(input_file)
# Generate a temporary file
with common.atomic_write(
input_file.name, file_factory=common.FileType('wt')) as tf:
convert.transform_file(input_file, tf, arguments)
if hasattr(input_file, 'close'):
input_file.close() | [
"def",
"action",
"(",
"arguments",
")",
":",
"for",
"input_file",
"in",
"arguments",
".",
"input_files",
":",
"logging",
".",
"info",
"(",
"input_file",
")",
"# Generate a temporary file",
"with",
"common",
".",
"atomic_write",
"(",
"input_file",
".",
"name",
... | Run mogrify. Most of the action is in convert, this just creates a temp
file for the output. | [
"Run",
"mogrify",
".",
"Most",
"of",
"the",
"action",
"is",
"in",
"convert",
"this",
"just",
"creates",
"a",
"temp",
"file",
"for",
"the",
"output",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/mogrify.py#L22-L34 | train | 38,943 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | all_unambiguous | def all_unambiguous(sequence_str):
"""
All unambiguous versions of sequence_str
"""
result = [[]]
for c in sequence_str:
result = [i + [a] for i in result for a in _AMBIGUOUS_MAP.get(c, c)]
return [''.join(i) for i in result] | python | def all_unambiguous(sequence_str):
"""
All unambiguous versions of sequence_str
"""
result = [[]]
for c in sequence_str:
result = [i + [a] for i in result for a in _AMBIGUOUS_MAP.get(c, c)]
return [''.join(i) for i in result] | [
"def",
"all_unambiguous",
"(",
"sequence_str",
")",
":",
"result",
"=",
"[",
"[",
"]",
"]",
"for",
"c",
"in",
"sequence_str",
":",
"result",
"=",
"[",
"i",
"+",
"[",
"a",
"]",
"for",
"i",
"in",
"result",
"for",
"a",
"in",
"_AMBIGUOUS_MAP",
".",
"ge... | All unambiguous versions of sequence_str | [
"All",
"unambiguous",
"versions",
"of",
"sequence_str"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L44-L51 | train | 38,944 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | moving_average | def moving_average(iterable, n):
"""
From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
"""
it = iter(iterable)
d = collections.deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n) | python | def moving_average(iterable, n):
"""
From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
"""
it = iter(iterable)
d = collections.deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n) | [
"def",
"moving_average",
"(",
"iterable",
",",
"n",
")",
":",
"it",
"=",
"iter",
"(",
"iterable",
")",
"d",
"=",
"collections",
".",
"deque",
"(",
"itertools",
".",
"islice",
"(",
"it",
",",
"n",
"-",
"1",
")",
")",
"d",
".",
"appendleft",
"(",
"... | From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0 | [
"From",
"Python",
"collections",
"module",
"documentation"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L204-L217 | train | 38,945 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | parse_barcode_file | def parse_barcode_file(fp, primer=None, header=False):
"""
Load label, barcode, primer records from a CSV file.
Returns a map from barcode -> label
Any additional columns are ignored
"""
tr = trie.trie()
reader = csv.reader(fp)
if header:
# Skip header
next(reader)
# Skip blank rows
records = (record for record in reader if record)
for record in records:
specimen, barcode = record[:2]
if primer is not None:
pr = primer
else:
pr = record[2]
for sequence in all_unambiguous(barcode + pr):
if sequence in tr:
raise ValueError("Duplicate sample: {0}, {1} both have {2}",
specimen, tr[sequence], sequence)
logging.info('%s->%s', sequence, specimen)
tr[sequence] = specimen
return tr | python | def parse_barcode_file(fp, primer=None, header=False):
"""
Load label, barcode, primer records from a CSV file.
Returns a map from barcode -> label
Any additional columns are ignored
"""
tr = trie.trie()
reader = csv.reader(fp)
if header:
# Skip header
next(reader)
# Skip blank rows
records = (record for record in reader if record)
for record in records:
specimen, barcode = record[:2]
if primer is not None:
pr = primer
else:
pr = record[2]
for sequence in all_unambiguous(barcode + pr):
if sequence in tr:
raise ValueError("Duplicate sample: {0}, {1} both have {2}",
specimen, tr[sequence], sequence)
logging.info('%s->%s', sequence, specimen)
tr[sequence] = specimen
return tr | [
"def",
"parse_barcode_file",
"(",
"fp",
",",
"primer",
"=",
"None",
",",
"header",
"=",
"False",
")",
":",
"tr",
"=",
"trie",
".",
"trie",
"(",
")",
"reader",
"=",
"csv",
".",
"reader",
"(",
"fp",
")",
"if",
"header",
":",
"# Skip header",
"next",
... | Load label, barcode, primer records from a CSV file.
Returns a map from barcode -> label
Any additional columns are ignored | [
"Load",
"label",
"barcode",
"primer",
"records",
"from",
"a",
"CSV",
"file",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L641-L672 | train | 38,946 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | action | def action(arguments):
"""
Given parsed arguments, filter input files.
"""
if arguments.quality_window_mean_qual and not arguments.quality_window:
raise ValueError("--quality-window-mean-qual specified without "
"--quality-window")
if trie is None or triefind is None:
raise ValueError(
'Missing Bio.trie and/or Bio.triefind modules. Cannot continue')
filters = []
input_type = fileformat.from_handle(arguments.sequence_file)
output_type = fileformat.from_handle(arguments.output_file)
with arguments.sequence_file as fp:
if arguments.input_qual:
sequences = QualityIO.PairedFastaQualIterator(
fp, arguments.input_qual)
else:
sequences = SeqIO.parse(fp, input_type)
listener = RecordEventListener()
if arguments.details_out:
rh = RecordReportHandler(arguments.details_out, arguments.argv,
arguments.details_comment)
rh.register_with(listener)
# Track read sequences
sequences = listener.iterable_hook('read', sequences)
# Add filters
if arguments.min_mean_quality and input_type == 'fastq':
qfilter = QualityScoreFilter(arguments.min_mean_quality)
filters.append(qfilter)
if arguments.max_length:
max_length_filter = MaxLengthFilter(arguments.max_length)
filters.append(max_length_filter)
if arguments.min_length:
min_length_filter = MinLengthFilter(arguments.min_length)
filters.append(min_length_filter)
if arguments.max_ambiguous is not None:
max_ambig_filter = MaxAmbiguousFilter(arguments.max_ambiguous)
filters.append(max_ambig_filter)
if arguments.pct_ambiguous is not None:
pct_ambig_filter = PctAmbiguousFilter(arguments.pct_ambiguous)
filters.append(pct_ambig_filter)
if arguments.ambiguous_action:
ambiguous_filter = AmbiguousBaseFilter(arguments.ambiguous_action)
filters.append(ambiguous_filter)
if arguments.quality_window:
min_qual = (arguments.quality_window_mean_qual or
arguments.min_mean_quality)
window_filter = WindowQualityScoreFilter(arguments.quality_window,
min_qual)
filters.insert(0, window_filter)
if arguments.barcode_file:
with arguments.barcode_file:
tr = parse_barcode_file(arguments.barcode_file,
arguments.primer,
arguments.barcode_header)
f = PrimerBarcodeFilter(tr)
filters.append(f)
if arguments.map_out:
barcode_writer = csv.writer(
arguments.map_out,
quoting=getattr(csv, arguments.quoting),
lineterminator='\n')
def barcode_handler(record, sample, barcode=None):
barcode_writer.writerow((record.id, sample))
listener.register_handler('found_barcode', barcode_handler)
for f in filters:
f.listener = listener
sequences = f.filter_records(sequences)
# Track sequences which passed all filters
sequences = listener.iterable_hook('write', sequences)
with arguments.output_file:
SeqIO.write(sequences, arguments.output_file, output_type)
rpt_rows = (f.report_dict() for f in filters)
# Write report
with arguments.report_out as fp:
writer = csv.DictWriter(
fp, BaseFilter.report_fields, lineterminator='\n', delimiter='\t')
writer.writeheader()
writer.writerows(rpt_rows) | python | def action(arguments):
"""
Given parsed arguments, filter input files.
"""
if arguments.quality_window_mean_qual and not arguments.quality_window:
raise ValueError("--quality-window-mean-qual specified without "
"--quality-window")
if trie is None or triefind is None:
raise ValueError(
'Missing Bio.trie and/or Bio.triefind modules. Cannot continue')
filters = []
input_type = fileformat.from_handle(arguments.sequence_file)
output_type = fileformat.from_handle(arguments.output_file)
with arguments.sequence_file as fp:
if arguments.input_qual:
sequences = QualityIO.PairedFastaQualIterator(
fp, arguments.input_qual)
else:
sequences = SeqIO.parse(fp, input_type)
listener = RecordEventListener()
if arguments.details_out:
rh = RecordReportHandler(arguments.details_out, arguments.argv,
arguments.details_comment)
rh.register_with(listener)
# Track read sequences
sequences = listener.iterable_hook('read', sequences)
# Add filters
if arguments.min_mean_quality and input_type == 'fastq':
qfilter = QualityScoreFilter(arguments.min_mean_quality)
filters.append(qfilter)
if arguments.max_length:
max_length_filter = MaxLengthFilter(arguments.max_length)
filters.append(max_length_filter)
if arguments.min_length:
min_length_filter = MinLengthFilter(arguments.min_length)
filters.append(min_length_filter)
if arguments.max_ambiguous is not None:
max_ambig_filter = MaxAmbiguousFilter(arguments.max_ambiguous)
filters.append(max_ambig_filter)
if arguments.pct_ambiguous is not None:
pct_ambig_filter = PctAmbiguousFilter(arguments.pct_ambiguous)
filters.append(pct_ambig_filter)
if arguments.ambiguous_action:
ambiguous_filter = AmbiguousBaseFilter(arguments.ambiguous_action)
filters.append(ambiguous_filter)
if arguments.quality_window:
min_qual = (arguments.quality_window_mean_qual or
arguments.min_mean_quality)
window_filter = WindowQualityScoreFilter(arguments.quality_window,
min_qual)
filters.insert(0, window_filter)
if arguments.barcode_file:
with arguments.barcode_file:
tr = parse_barcode_file(arguments.barcode_file,
arguments.primer,
arguments.barcode_header)
f = PrimerBarcodeFilter(tr)
filters.append(f)
if arguments.map_out:
barcode_writer = csv.writer(
arguments.map_out,
quoting=getattr(csv, arguments.quoting),
lineterminator='\n')
def barcode_handler(record, sample, barcode=None):
barcode_writer.writerow((record.id, sample))
listener.register_handler('found_barcode', barcode_handler)
for f in filters:
f.listener = listener
sequences = f.filter_records(sequences)
# Track sequences which passed all filters
sequences = listener.iterable_hook('write', sequences)
with arguments.output_file:
SeqIO.write(sequences, arguments.output_file, output_type)
rpt_rows = (f.report_dict() for f in filters)
# Write report
with arguments.report_out as fp:
writer = csv.DictWriter(
fp, BaseFilter.report_fields, lineterminator='\n', delimiter='\t')
writer.writeheader()
writer.writerows(rpt_rows) | [
"def",
"action",
"(",
"arguments",
")",
":",
"if",
"arguments",
".",
"quality_window_mean_qual",
"and",
"not",
"arguments",
".",
"quality_window",
":",
"raise",
"ValueError",
"(",
"\"--quality-window-mean-qual specified without \"",
"\"--quality-window\"",
")",
"if",
"t... | Given parsed arguments, filter input files. | [
"Given",
"parsed",
"arguments",
"filter",
"input",
"files",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L675-L767 | train | 38,947 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | RecordEventListener.iterable_hook | def iterable_hook(self, name, iterable):
"""
Fire an event named ``name`` with each item in iterable
"""
for record in iterable:
self(name, record)
yield record | python | def iterable_hook(self, name, iterable):
"""
Fire an event named ``name`` with each item in iterable
"""
for record in iterable:
self(name, record)
yield record | [
"def",
"iterable_hook",
"(",
"self",
",",
"name",
",",
"iterable",
")",
":",
"for",
"record",
"in",
"iterable",
":",
"self",
"(",
"name",
",",
"record",
")",
"yield",
"record"
] | Fire an event named ``name`` with each item in iterable | [
"Fire",
"an",
"event",
"named",
"name",
"with",
"each",
"item",
"in",
"iterable"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L258-L264 | train | 38,948 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | RecordReportHandler._found_barcode | def _found_barcode(self, record, sample, barcode=None):
"""Hook called when barcode is found"""
assert record.id == self.current_record['sequence_name']
self.current_record['sample'] = sample | python | def _found_barcode(self, record, sample, barcode=None):
"""Hook called when barcode is found"""
assert record.id == self.current_record['sequence_name']
self.current_record['sample'] = sample | [
"def",
"_found_barcode",
"(",
"self",
",",
"record",
",",
"sample",
",",
"barcode",
"=",
"None",
")",
":",
"assert",
"record",
".",
"id",
"==",
"self",
".",
"current_record",
"[",
"'sequence_name'",
"]",
"self",
".",
"current_record",
"[",
"'sample'",
"]",... | Hook called when barcode is found | [
"Hook",
"called",
"when",
"barcode",
"is",
"found"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L327-L330 | train | 38,949 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | BaseFilter.filter_records | def filter_records(self, records):
"""
Apply the filter to records
"""
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
# Quick tracking whether the sequence was modified
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
'failed_filter',
record,
filter_name=self.name,
value=v) | python | def filter_records(self, records):
"""
Apply the filter to records
"""
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
# Quick tracking whether the sequence was modified
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
'failed_filter',
record,
filter_name=self.name,
value=v) | [
"def",
"filter_records",
"(",
"self",
",",
"records",
")",
":",
"for",
"record",
"in",
"records",
":",
"try",
":",
"filtered",
"=",
"self",
".",
"filter_record",
"(",
"record",
")",
"assert",
"(",
"filtered",
")",
"# Quick tracking whether the sequence was modif... | Apply the filter to records | [
"Apply",
"the",
"filter",
"to",
"records"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L374-L396 | train | 38,950 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | AmbiguousBaseFilter.filter_record | def filter_record(self, record):
"""
Filter a record, truncating or dropping at an 'N'
"""
nloc = record.seq.find('N')
if nloc == -1:
return record
elif self.action == 'truncate':
return record[:nloc]
elif self.action == 'drop':
raise FailedFilter()
else:
assert False | python | def filter_record(self, record):
"""
Filter a record, truncating or dropping at an 'N'
"""
nloc = record.seq.find('N')
if nloc == -1:
return record
elif self.action == 'truncate':
return record[:nloc]
elif self.action == 'drop':
raise FailedFilter()
else:
assert False | [
"def",
"filter_record",
"(",
"self",
",",
"record",
")",
":",
"nloc",
"=",
"record",
".",
"seq",
".",
"find",
"(",
"'N'",
")",
"if",
"nloc",
"==",
"-",
"1",
":",
"return",
"record",
"elif",
"self",
".",
"action",
"==",
"'truncate'",
":",
"return",
... | Filter a record, truncating or dropping at an 'N' | [
"Filter",
"a",
"record",
"truncating",
"or",
"dropping",
"at",
"an",
"N"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L504-L516 | train | 38,951 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | MinLengthFilter.filter_record | def filter_record(self, record):
"""
Filter record, dropping any that don't meet minimum length
"""
if len(record) >= self.min_length:
return record
else:
raise FailedFilter(len(record)) | python | def filter_record(self, record):
"""
Filter record, dropping any that don't meet minimum length
"""
if len(record) >= self.min_length:
return record
else:
raise FailedFilter(len(record)) | [
"def",
"filter_record",
"(",
"self",
",",
"record",
")",
":",
"if",
"len",
"(",
"record",
")",
">=",
"self",
".",
"min_length",
":",
"return",
"record",
"else",
":",
"raise",
"FailedFilter",
"(",
"len",
"(",
"record",
")",
")"
] | Filter record, dropping any that don't meet minimum length | [
"Filter",
"record",
"dropping",
"any",
"that",
"don",
"t",
"meet",
"minimum",
"length"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L575-L583 | train | 38,952 |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | MaxLengthFilter.filter_record | def filter_record(self, record):
"""
Filter record, truncating any over some maximum length
"""
if len(record) >= self.max_length:
return record[:self.max_length]
else:
return record | python | def filter_record(self, record):
"""
Filter record, truncating any over some maximum length
"""
if len(record) >= self.max_length:
return record[:self.max_length]
else:
return record | [
"def",
"filter_record",
"(",
"self",
",",
"record",
")",
":",
"if",
"len",
"(",
"record",
")",
">=",
"self",
".",
"max_length",
":",
"return",
"record",
"[",
":",
"self",
".",
"max_length",
"]",
"else",
":",
"return",
"record"
] | Filter record, truncating any over some maximum length | [
"Filter",
"record",
"truncating",
"any",
"over",
"some",
"maximum",
"length"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L597-L604 | train | 38,953 |
fhcrc/seqmagick | seqmagick/subcommands/info.py | summarize_sequence_file | def summarize_sequence_file(source_file, file_type=None):
"""
Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences.
"""
is_alignment = True
avg_length = None
min_length = sys.maxsize
max_length = 0
sequence_count = 0
# Get an iterator and analyze the data.
with common.FileType('rt')(source_file) as fp:
if not file_type:
file_type = fileformat.from_handle(fp)
for record in SeqIO.parse(fp, file_type):
sequence_count += 1
sequence_length = len(record)
if max_length != 0:
# If even one sequence is not the same length as the others,
# we don't consider this an alignment.
if sequence_length != max_length:
is_alignment = False
# Lengths
if sequence_length > max_length:
max_length = sequence_length
if sequence_length < min_length:
min_length = sequence_length
# Average length
if sequence_count == 1:
avg_length = float(sequence_length)
else:
avg_length = avg_length + ((sequence_length - avg_length) /
sequence_count)
# Handle an empty file:
if avg_length is None:
min_length = max_length = avg_length = 0
if sequence_count <= 1:
is_alignment = False
return (source_file, str(is_alignment).upper(), min_length,
max_length, avg_length, sequence_count) | python | def summarize_sequence_file(source_file, file_type=None):
"""
Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences.
"""
is_alignment = True
avg_length = None
min_length = sys.maxsize
max_length = 0
sequence_count = 0
# Get an iterator and analyze the data.
with common.FileType('rt')(source_file) as fp:
if not file_type:
file_type = fileformat.from_handle(fp)
for record in SeqIO.parse(fp, file_type):
sequence_count += 1
sequence_length = len(record)
if max_length != 0:
# If even one sequence is not the same length as the others,
# we don't consider this an alignment.
if sequence_length != max_length:
is_alignment = False
# Lengths
if sequence_length > max_length:
max_length = sequence_length
if sequence_length < min_length:
min_length = sequence_length
# Average length
if sequence_count == 1:
avg_length = float(sequence_length)
else:
avg_length = avg_length + ((sequence_length - avg_length) /
sequence_count)
# Handle an empty file:
if avg_length is None:
min_length = max_length = avg_length = 0
if sequence_count <= 1:
is_alignment = False
return (source_file, str(is_alignment).upper(), min_length,
max_length, avg_length, sequence_count) | [
"def",
"summarize_sequence_file",
"(",
"source_file",
",",
"file_type",
"=",
"None",
")",
":",
"is_alignment",
"=",
"True",
"avg_length",
"=",
"None",
"min_length",
"=",
"sys",
".",
"maxsize",
"max_length",
"=",
"0",
"sequence_count",
"=",
"0",
"# Get an iterato... | Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences. | [
"Summarizes",
"a",
"sequence",
"file",
"returning",
"a",
"tuple",
"containing",
"the",
"name",
"whether",
"the",
"file",
"is",
"an",
"alignment",
"minimum",
"sequence",
"length",
"maximum",
"sequence",
"length",
"average",
"length",
"number",
"of",
"sequences",
... | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/info.py#L98-L143 | train | 38,954 |
fhcrc/seqmagick | seqmagick/transform.py | _record_buffer | def _record_buffer(records, buffer_size=DEFAULT_BUFFER_SIZE):
"""
Buffer for transform functions which require multiple passes through data.
Value returned by context manager is a function which returns an iterator
through records.
"""
with tempfile.SpooledTemporaryFile(buffer_size, mode='wb+') as tf:
pickler = pickle.Pickler(tf)
for record in records:
pickler.dump(record)
def record_iter():
tf.seek(0)
unpickler = pickle.Unpickler(tf)
while True:
try:
yield unpickler.load()
except EOFError:
break
yield record_iter | python | def _record_buffer(records, buffer_size=DEFAULT_BUFFER_SIZE):
"""
Buffer for transform functions which require multiple passes through data.
Value returned by context manager is a function which returns an iterator
through records.
"""
with tempfile.SpooledTemporaryFile(buffer_size, mode='wb+') as tf:
pickler = pickle.Pickler(tf)
for record in records:
pickler.dump(record)
def record_iter():
tf.seek(0)
unpickler = pickle.Unpickler(tf)
while True:
try:
yield unpickler.load()
except EOFError:
break
yield record_iter | [
"def",
"_record_buffer",
"(",
"records",
",",
"buffer_size",
"=",
"DEFAULT_BUFFER_SIZE",
")",
":",
"with",
"tempfile",
".",
"SpooledTemporaryFile",
"(",
"buffer_size",
",",
"mode",
"=",
"'wb+'",
")",
"as",
"tf",
":",
"pickler",
"=",
"pickle",
".",
"Pickler",
... | Buffer for transform functions which require multiple passes through data.
Value returned by context manager is a function which returns an iterator
through records. | [
"Buffer",
"for",
"transform",
"functions",
"which",
"require",
"multiple",
"passes",
"through",
"data",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L32-L53 | train | 38,955 |
fhcrc/seqmagick | seqmagick/transform.py | dashes_cleanup | def dashes_cleanup(records, prune_chars='.:?~'):
"""
Take an alignment and convert any undesirable characters such as ? or ~ to
-.
"""
logging.info(
"Applying _dashes_cleanup: converting any of '{}' to '-'.".format(prune_chars))
translation_table = {ord(c): '-' for c in prune_chars}
for record in records:
record.seq = Seq(str(record.seq).translate(translation_table),
record.seq.alphabet)
yield record | python | def dashes_cleanup(records, prune_chars='.:?~'):
"""
Take an alignment and convert any undesirable characters such as ? or ~ to
-.
"""
logging.info(
"Applying _dashes_cleanup: converting any of '{}' to '-'.".format(prune_chars))
translation_table = {ord(c): '-' for c in prune_chars}
for record in records:
record.seq = Seq(str(record.seq).translate(translation_table),
record.seq.alphabet)
yield record | [
"def",
"dashes_cleanup",
"(",
"records",
",",
"prune_chars",
"=",
"'.:?~'",
")",
":",
"logging",
".",
"info",
"(",
"\"Applying _dashes_cleanup: converting any of '{}' to '-'.\"",
".",
"format",
"(",
"prune_chars",
")",
")",
"translation_table",
"=",
"{",
"ord",
"(",... | Take an alignment and convert any undesirable characters such as ? or ~ to
-. | [
"Take",
"an",
"alignment",
"and",
"convert",
"any",
"undesirable",
"characters",
"such",
"as",
"?",
"or",
"~",
"to",
"-",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L56-L67 | train | 38,956 |
fhcrc/seqmagick | seqmagick/transform.py | deduplicate_sequences | def deduplicate_sequences(records, out_file):
"""
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_sequences generator: '
'removing any duplicate records with identical sequences.')
checksum_sequences = collections.defaultdict(list)
for record in records:
checksum = seguid(record.seq)
sequences = checksum_sequences[checksum]
if not sequences:
yield record
sequences.append(record.id)
if out_file is not None:
with out_file:
for sequences in checksum_sequences.values():
out_file.write('%s\n' % (' '.join(sequences),)) | python | def deduplicate_sequences(records, out_file):
"""
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_sequences generator: '
'removing any duplicate records with identical sequences.')
checksum_sequences = collections.defaultdict(list)
for record in records:
checksum = seguid(record.seq)
sequences = checksum_sequences[checksum]
if not sequences:
yield record
sequences.append(record.id)
if out_file is not None:
with out_file:
for sequences in checksum_sequences.values():
out_file.write('%s\n' % (' '.join(sequences),)) | [
"def",
"deduplicate_sequences",
"(",
"records",
",",
"out_file",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _deduplicate_sequences generator: '",
"'removing any duplicate records with identical sequences.'",
")",
"checksum_sequences",
"=",
"collections",
".",
"defaultdic... | Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences. | [
"Remove",
"any",
"duplicate",
"records",
"with",
"identical",
"sequences",
"keep",
"the",
"first",
"instance",
"seen",
"and",
"discard",
"additional",
"occurences",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L70-L89 | train | 38,957 |
fhcrc/seqmagick | seqmagick/transform.py | deduplicate_taxa | def deduplicate_taxa(records):
"""
Remove any duplicate records with identical IDs, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_taxa generator: ' + \
'removing any duplicate records with identical IDs.')
taxa = set()
for record in records:
# Default to full ID, split if | is found.
taxid = record.id
if '|' in record.id:
try:
taxid = int(record.id.split("|")[0])
except:
# If we couldn't parse an integer from the ID, just fall back
# on the ID
logging.warn("Unable to parse integer taxid from %s",
taxid)
if taxid in taxa:
continue
taxa.add(taxid)
yield record | python | def deduplicate_taxa(records):
"""
Remove any duplicate records with identical IDs, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_taxa generator: ' + \
'removing any duplicate records with identical IDs.')
taxa = set()
for record in records:
# Default to full ID, split if | is found.
taxid = record.id
if '|' in record.id:
try:
taxid = int(record.id.split("|")[0])
except:
# If we couldn't parse an integer from the ID, just fall back
# on the ID
logging.warn("Unable to parse integer taxid from %s",
taxid)
if taxid in taxa:
continue
taxa.add(taxid)
yield record | [
"def",
"deduplicate_taxa",
"(",
"records",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _deduplicate_taxa generator: '",
"+",
"'removing any duplicate records with identical IDs.'",
")",
"taxa",
"=",
"set",
"(",
")",
"for",
"record",
"in",
"records",
":",
"# Defa... | Remove any duplicate records with identical IDs, keep the first
instance seen and discard additional occurences. | [
"Remove",
"any",
"duplicate",
"records",
"with",
"identical",
"IDs",
"keep",
"the",
"first",
"instance",
"seen",
"and",
"discard",
"additional",
"occurences",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L92-L114 | train | 38,958 |
fhcrc/seqmagick | seqmagick/transform.py | first_name_capture | def first_name_capture(records):
"""
Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description.
"""
logging.info('Applying _first_name_capture generator: '
'making sure ID only contains the first whitespace-delimited '
'word.')
whitespace = re.compile(r'\s+')
for record in records:
if whitespace.search(record.description):
yield SeqRecord(record.seq, id=record.id,
description="")
else:
yield record | python | def first_name_capture(records):
"""
Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description.
"""
logging.info('Applying _first_name_capture generator: '
'making sure ID only contains the first whitespace-delimited '
'word.')
whitespace = re.compile(r'\s+')
for record in records:
if whitespace.search(record.description):
yield SeqRecord(record.seq, id=record.id,
description="")
else:
yield record | [
"def",
"first_name_capture",
"(",
"records",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _first_name_capture generator: '",
"'making sure ID only contains the first whitespace-delimited '",
"'word.'",
")",
"whitespace",
"=",
"re",
".",
"compile",
"(",
"r'\\s+'",
")",
... | Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description. | [
"Take",
"only",
"the",
"first",
"whitespace",
"-",
"delimited",
"word",
"as",
"the",
"name",
"of",
"the",
"sequence",
".",
"Essentially",
"removes",
"any",
"extra",
"text",
"from",
"the",
"sequence",
"s",
"description",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L117-L131 | train | 38,959 |
fhcrc/seqmagick | seqmagick/transform.py | include_from_file | def include_from_file(records, handle):
"""
Filter the records, keeping only sequences whose ID is contained in the
handle.
"""
ids = set(i.strip() for i in handle)
for record in records:
if record.id.strip() in ids:
yield record | python | def include_from_file(records, handle):
"""
Filter the records, keeping only sequences whose ID is contained in the
handle.
"""
ids = set(i.strip() for i in handle)
for record in records:
if record.id.strip() in ids:
yield record | [
"def",
"include_from_file",
"(",
"records",
",",
"handle",
")",
":",
"ids",
"=",
"set",
"(",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"handle",
")",
"for",
"record",
"in",
"records",
":",
"if",
"record",
".",
"id",
".",
"strip",
"(",
")",
"... | Filter the records, keeping only sequences whose ID is contained in the
handle. | [
"Filter",
"the",
"records",
"keeping",
"only",
"sequences",
"whose",
"ID",
"is",
"contained",
"in",
"the",
"handle",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L134-L143 | train | 38,960 |
fhcrc/seqmagick | seqmagick/transform.py | drop_columns | def drop_columns(records, slices):
"""
Drop all columns present in ``slices`` from records
"""
for record in records:
# Generate a set of indices to remove
drop = set(i for slice in slices
for i in range(*slice.indices(len(record))))
keep = [i not in drop for i in range(len(record))]
record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet)
yield record | python | def drop_columns(records, slices):
"""
Drop all columns present in ``slices`` from records
"""
for record in records:
# Generate a set of indices to remove
drop = set(i for slice in slices
for i in range(*slice.indices(len(record))))
keep = [i not in drop for i in range(len(record))]
record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet)
yield record | [
"def",
"drop_columns",
"(",
"records",
",",
"slices",
")",
":",
"for",
"record",
"in",
"records",
":",
"# Generate a set of indices to remove",
"drop",
"=",
"set",
"(",
"i",
"for",
"slice",
"in",
"slices",
"for",
"i",
"in",
"range",
"(",
"*",
"slice",
".",... | Drop all columns present in ``slices`` from records | [
"Drop",
"all",
"columns",
"present",
"in",
"slices",
"from",
"records"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L184-L194 | train | 38,961 |
fhcrc/seqmagick | seqmagick/transform.py | cut_sequences_relative | def cut_sequences_relative(records, slices, record_id):
"""
Cuts records to slices, indexed by non-gap positions in record_id
"""
with _record_buffer(records) as r:
try:
record = next(i for i in r() if i.id == record_id)
except StopIteration:
raise ValueError("Record with id {0} not found.".format(record_id))
new_slices = _update_slices(record, slices)
for record in multi_cut_sequences(r(), new_slices):
yield record | python | def cut_sequences_relative(records, slices, record_id):
"""
Cuts records to slices, indexed by non-gap positions in record_id
"""
with _record_buffer(records) as r:
try:
record = next(i for i in r() if i.id == record_id)
except StopIteration:
raise ValueError("Record with id {0} not found.".format(record_id))
new_slices = _update_slices(record, slices)
for record in multi_cut_sequences(r(), new_slices):
yield record | [
"def",
"cut_sequences_relative",
"(",
"records",
",",
"slices",
",",
"record_id",
")",
":",
"with",
"_record_buffer",
"(",
"records",
")",
"as",
"r",
":",
"try",
":",
"record",
"=",
"next",
"(",
"i",
"for",
"i",
"in",
"r",
"(",
")",
"if",
"i",
".",
... | Cuts records to slices, indexed by non-gap positions in record_id | [
"Cuts",
"records",
"to",
"slices",
"indexed",
"by",
"non",
"-",
"gap",
"positions",
"in",
"record_id"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L242-L254 | train | 38,962 |
fhcrc/seqmagick | seqmagick/transform.py | multi_mask_sequences | def multi_mask_sequences(records, slices):
"""
Replace characters sliced by slices with gap characters.
"""
for record in records:
record_indices = list(range(len(record)))
keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),
slices, frozenset(record_indices))
seq = ''.join(b if i in keep_indices else '-'
for i, b in enumerate(str(record.seq)))
record.seq = Seq(seq)
yield record | python | def multi_mask_sequences(records, slices):
"""
Replace characters sliced by slices with gap characters.
"""
for record in records:
record_indices = list(range(len(record)))
keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),
slices, frozenset(record_indices))
seq = ''.join(b if i in keep_indices else '-'
for i, b in enumerate(str(record.seq)))
record.seq = Seq(seq)
yield record | [
"def",
"multi_mask_sequences",
"(",
"records",
",",
"slices",
")",
":",
"for",
"record",
"in",
"records",
":",
"record_indices",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"record",
")",
")",
")",
"keep_indices",
"=",
"reduce",
"(",
"lambda",
"i",
",",
... | Replace characters sliced by slices with gap characters. | [
"Replace",
"characters",
"sliced",
"by",
"slices",
"with",
"gap",
"characters",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L256-L267 | train | 38,963 |
fhcrc/seqmagick | seqmagick/transform.py | reverse_sequences | def reverse_sequences(records):
"""
Reverse the order of sites in sequences.
"""
logging.info('Applying _reverse_sequences generator: '
'reversing the order of sites in sequences.')
for record in records:
rev_record = SeqRecord(record.seq[::-1], id=record.id,
name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | python | def reverse_sequences(records):
"""
Reverse the order of sites in sequences.
"""
logging.info('Applying _reverse_sequences generator: '
'reversing the order of sites in sequences.')
for record in records:
rev_record = SeqRecord(record.seq[::-1], id=record.id,
name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | [
"def",
"reverse_sequences",
"(",
"records",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _reverse_sequences generator: '",
"'reversing the order of sites in sequences.'",
")",
"for",
"record",
"in",
"records",
":",
"rev_record",
"=",
"SeqRecord",
"(",
"record",
"."... | Reverse the order of sites in sequences. | [
"Reverse",
"the",
"order",
"of",
"sites",
"in",
"sequences",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L330-L343 | train | 38,964 |
fhcrc/seqmagick | seqmagick/transform.py | reverse_complement_sequences | def reverse_complement_sequences(records):
"""
Transform sequences into reverse complements.
"""
logging.info('Applying _reverse_complement_sequences generator: '
'transforming sequences into reverse complements.')
for record in records:
rev_record = SeqRecord(record.seq.reverse_complement(),
id=record.id, name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | python | def reverse_complement_sequences(records):
"""
Transform sequences into reverse complements.
"""
logging.info('Applying _reverse_complement_sequences generator: '
'transforming sequences into reverse complements.')
for record in records:
rev_record = SeqRecord(record.seq.reverse_complement(),
id=record.id, name=record.name,
description=record.description)
# Copy the annotations over
_reverse_annotations(record, rev_record)
yield rev_record | [
"def",
"reverse_complement_sequences",
"(",
"records",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _reverse_complement_sequences generator: '",
"'transforming sequences into reverse complements.'",
")",
"for",
"record",
"in",
"records",
":",
"rev_record",
"=",
"SeqRecor... | Transform sequences into reverse complements. | [
"Transform",
"sequences",
"into",
"reverse",
"complements",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L346-L359 | train | 38,965 |
fhcrc/seqmagick | seqmagick/transform.py | ungap_sequences | def ungap_sequences(records, gap_chars=GAP_TABLE):
"""
Remove gaps from sequences, given an alignment.
"""
logging.info('Applying _ungap_sequences generator: removing all gap characters')
for record in records:
yield ungap_all(record, gap_chars) | python | def ungap_sequences(records, gap_chars=GAP_TABLE):
"""
Remove gaps from sequences, given an alignment.
"""
logging.info('Applying _ungap_sequences generator: removing all gap characters')
for record in records:
yield ungap_all(record, gap_chars) | [
"def",
"ungap_sequences",
"(",
"records",
",",
"gap_chars",
"=",
"GAP_TABLE",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _ungap_sequences generator: removing all gap characters'",
")",
"for",
"record",
"in",
"records",
":",
"yield",
"ungap_all",
"(",
"record",
... | Remove gaps from sequences, given an alignment. | [
"Remove",
"gaps",
"from",
"sequences",
"given",
"an",
"alignment",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L362-L368 | train | 38,966 |
fhcrc/seqmagick | seqmagick/transform.py | _update_id | def _update_id(record, new_id):
"""
Update a record id to new_id, also modifying the ID in record.description
"""
old_id = record.id
record.id = new_id
# At least for FASTA, record ID starts the description
record.description = re.sub('^' + re.escape(old_id), new_id, record.description)
return record | python | def _update_id(record, new_id):
"""
Update a record id to new_id, also modifying the ID in record.description
"""
old_id = record.id
record.id = new_id
# At least for FASTA, record ID starts the description
record.description = re.sub('^' + re.escape(old_id), new_id, record.description)
return record | [
"def",
"_update_id",
"(",
"record",
",",
"new_id",
")",
":",
"old_id",
"=",
"record",
".",
"id",
"record",
".",
"id",
"=",
"new_id",
"# At least for FASTA, record ID starts the description",
"record",
".",
"description",
"=",
"re",
".",
"sub",
"(",
"'^'",
"+",... | Update a record id to new_id, also modifying the ID in record.description | [
"Update",
"a",
"record",
"id",
"to",
"new_id",
"also",
"modifying",
"the",
"ID",
"in",
"record",
".",
"description"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L380-L389 | train | 38,967 |
fhcrc/seqmagick | seqmagick/transform.py | name_append_suffix | def name_append_suffix(records, suffix):
"""
Given a set of sequences, append a suffix for each sequence's name.
"""
logging.info('Applying _name_append_suffix generator: '
'Appending suffix ' + suffix + ' to all '
'sequence IDs.')
for record in records:
new_id = record.id + suffix
_update_id(record, new_id)
yield record | python | def name_append_suffix(records, suffix):
"""
Given a set of sequences, append a suffix for each sequence's name.
"""
logging.info('Applying _name_append_suffix generator: '
'Appending suffix ' + suffix + ' to all '
'sequence IDs.')
for record in records:
new_id = record.id + suffix
_update_id(record, new_id)
yield record | [
"def",
"name_append_suffix",
"(",
"records",
",",
"suffix",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _name_append_suffix generator: '",
"'Appending suffix '",
"+",
"suffix",
"+",
"' to all '",
"'sequence IDs.'",
")",
"for",
"record",
"in",
"records",
":",
"... | Given a set of sequences, append a suffix for each sequence's name. | [
"Given",
"a",
"set",
"of",
"sequences",
"append",
"a",
"suffix",
"for",
"each",
"sequence",
"s",
"name",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L392-L402 | train | 38,968 |
fhcrc/seqmagick | seqmagick/transform.py | name_insert_prefix | def name_insert_prefix(records, prefix):
"""
Given a set of sequences, insert a prefix for each sequence's name.
"""
logging.info('Applying _name_insert_prefix generator: '
'Inserting prefix ' + prefix + ' for all '
'sequence IDs.')
for record in records:
new_id = prefix + record.id
_update_id(record, new_id)
yield record | python | def name_insert_prefix(records, prefix):
"""
Given a set of sequences, insert a prefix for each sequence's name.
"""
logging.info('Applying _name_insert_prefix generator: '
'Inserting prefix ' + prefix + ' for all '
'sequence IDs.')
for record in records:
new_id = prefix + record.id
_update_id(record, new_id)
yield record | [
"def",
"name_insert_prefix",
"(",
"records",
",",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _name_insert_prefix generator: '",
"'Inserting prefix '",
"+",
"prefix",
"+",
"' for all '",
"'sequence IDs.'",
")",
"for",
"record",
"in",
"records",
":",
... | Given a set of sequences, insert a prefix for each sequence's name. | [
"Given",
"a",
"set",
"of",
"sequences",
"insert",
"a",
"prefix",
"for",
"each",
"sequence",
"s",
"name",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L405-L415 | train | 38,969 |
fhcrc/seqmagick | seqmagick/transform.py | name_include | def name_include(records, filter_regex):
"""
Given a set of sequences, filter out any sequences with names
that do not match the specified regular expression. Ignore case.
"""
logging.info('Applying _name_include generator: '
'including only IDs matching ' + filter_regex +
' in results.')
regex = re.compile(filter_regex)
for record in records:
if regex.search(record.id) or regex.search(record.description):
yield record | python | def name_include(records, filter_regex):
"""
Given a set of sequences, filter out any sequences with names
that do not match the specified regular expression. Ignore case.
"""
logging.info('Applying _name_include generator: '
'including only IDs matching ' + filter_regex +
' in results.')
regex = re.compile(filter_regex)
for record in records:
if regex.search(record.id) or regex.search(record.description):
yield record | [
"def",
"name_include",
"(",
"records",
",",
"filter_regex",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _name_include generator: '",
"'including only IDs matching '",
"+",
"filter_regex",
"+",
"' in results.'",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"fi... | Given a set of sequences, filter out any sequences with names
that do not match the specified regular expression. Ignore case. | [
"Given",
"a",
"set",
"of",
"sequences",
"filter",
"out",
"any",
"sequences",
"with",
"names",
"that",
"do",
"not",
"match",
"the",
"specified",
"regular",
"expression",
".",
"Ignore",
"case",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L419-L430 | train | 38,970 |
fhcrc/seqmagick | seqmagick/transform.py | name_replace | def name_replace(records, search_regex, replace_pattern):
"""
Given a set of sequences, replace all occurrences of search_regex
with replace_pattern. Ignore case.
If the ID and the first word of the description match, assume the
description is FASTA-like and apply the transform to the entire
description, then set the ID from the first word. If the ID and
the first word of the description do not match, apply the transform
to each individually.
"""
regex = re.compile(search_regex)
for record in records:
maybe_id = record.description.split(None, 1)[0]
if maybe_id == record.id:
record.description = regex.sub(replace_pattern, record.description)
record.id = record.description.split(None, 1)[0]
else:
record.id = regex.sub(replace_pattern, record.id)
record.description = regex.sub(replace_pattern, record.description)
yield record | python | def name_replace(records, search_regex, replace_pattern):
"""
Given a set of sequences, replace all occurrences of search_regex
with replace_pattern. Ignore case.
If the ID and the first word of the description match, assume the
description is FASTA-like and apply the transform to the entire
description, then set the ID from the first word. If the ID and
the first word of the description do not match, apply the transform
to each individually.
"""
regex = re.compile(search_regex)
for record in records:
maybe_id = record.description.split(None, 1)[0]
if maybe_id == record.id:
record.description = regex.sub(replace_pattern, record.description)
record.id = record.description.split(None, 1)[0]
else:
record.id = regex.sub(replace_pattern, record.id)
record.description = regex.sub(replace_pattern, record.description)
yield record | [
"def",
"name_replace",
"(",
"records",
",",
"search_regex",
",",
"replace_pattern",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"search_regex",
")",
"for",
"record",
"in",
"records",
":",
"maybe_id",
"=",
"record",
".",
"description",
".",
"split",
... | Given a set of sequences, replace all occurrences of search_regex
with replace_pattern. Ignore case.
If the ID and the first word of the description match, assume the
description is FASTA-like and apply the transform to the entire
description, then set the ID from the first word. If the ID and
the first word of the description do not match, apply the transform
to each individually. | [
"Given",
"a",
"set",
"of",
"sequences",
"replace",
"all",
"occurrences",
"of",
"search_regex",
"with",
"replace_pattern",
".",
"Ignore",
"case",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L446-L466 | train | 38,971 |
fhcrc/seqmagick | seqmagick/transform.py | seq_include | def seq_include(records, filter_regex):
"""
Filter any sequences who's seq does not match the filter. Ignore case.
"""
regex = re.compile(filter_regex)
for record in records:
if regex.search(str(record.seq)):
yield record | python | def seq_include(records, filter_regex):
"""
Filter any sequences who's seq does not match the filter. Ignore case.
"""
regex = re.compile(filter_regex)
for record in records:
if regex.search(str(record.seq)):
yield record | [
"def",
"seq_include",
"(",
"records",
",",
"filter_regex",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"filter_regex",
")",
"for",
"record",
"in",
"records",
":",
"if",
"regex",
".",
"search",
"(",
"str",
"(",
"record",
".",
"seq",
")",
")",
"... | Filter any sequences who's seq does not match the filter. Ignore case. | [
"Filter",
"any",
"sequences",
"who",
"s",
"seq",
"does",
"not",
"match",
"the",
"filter",
".",
"Ignore",
"case",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L469-L476 | train | 38,972 |
fhcrc/seqmagick | seqmagick/transform.py | head | def head(records, head):
"""
Limit results to the top N records.
With the leading `-', print all but the last N records.
"""
logging.info('Applying _head generator: '
'limiting results to top ' + head + ' records.')
if head == '-0':
for record in records:
yield record
elif '-' in head:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
end_index = max(record_count + int(head), 0)
for record in itertools.islice(r(), end_index):
yield record
else:
for record in itertools.islice(records, int(head)):
yield record | python | def head(records, head):
"""
Limit results to the top N records.
With the leading `-', print all but the last N records.
"""
logging.info('Applying _head generator: '
'limiting results to top ' + head + ' records.')
if head == '-0':
for record in records:
yield record
elif '-' in head:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
end_index = max(record_count + int(head), 0)
for record in itertools.islice(r(), end_index):
yield record
else:
for record in itertools.islice(records, int(head)):
yield record | [
"def",
"head",
"(",
"records",
",",
"head",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _head generator: '",
"'limiting results to top '",
"+",
"head",
"+",
"' records.'",
")",
"if",
"head",
"==",
"'-0'",
":",
"for",
"record",
"in",
"records",
":",
"yi... | Limit results to the top N records.
With the leading `-', print all but the last N records. | [
"Limit",
"results",
"to",
"the",
"top",
"N",
"records",
".",
"With",
"the",
"leading",
"-",
"print",
"all",
"but",
"the",
"last",
"N",
"records",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L510-L529 | train | 38,973 |
fhcrc/seqmagick | seqmagick/transform.py | tail | def tail(records, tail):
"""
Limit results to the bottom N records.
Use +N to output records starting with the Nth.
"""
logging.info('Applying _tail generator: '
'limiting results to top ' + tail + ' records.')
if tail == '+0':
for record in records:
yield record
elif '+' in tail:
tail = int(tail) - 1
for record in itertools.islice(records, tail, None):
yield record
else:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
start_index = max(record_count - int(tail), 0)
for record in itertools.islice(r(), start_index, None):
yield record | python | def tail(records, tail):
"""
Limit results to the bottom N records.
Use +N to output records starting with the Nth.
"""
logging.info('Applying _tail generator: '
'limiting results to top ' + tail + ' records.')
if tail == '+0':
for record in records:
yield record
elif '+' in tail:
tail = int(tail) - 1
for record in itertools.islice(records, tail, None):
yield record
else:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
start_index = max(record_count - int(tail), 0)
for record in itertools.islice(r(), start_index, None):
yield record | [
"def",
"tail",
"(",
"records",
",",
"tail",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _tail generator: '",
"'limiting results to top '",
"+",
"tail",
"+",
"' records.'",
")",
"if",
"tail",
"==",
"'+0'",
":",
"for",
"record",
"in",
"records",
":",
"yi... | Limit results to the bottom N records.
Use +N to output records starting with the Nth. | [
"Limit",
"results",
"to",
"the",
"bottom",
"N",
"records",
".",
"Use",
"+",
"N",
"to",
"output",
"records",
"starting",
"with",
"the",
"Nth",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L531-L551 | train | 38,974 |
fhcrc/seqmagick | seqmagick/transform.py | gap_proportion | def gap_proportion(sequences, gap_chars='-'):
"""
Generates a list with the proportion of gaps by index in a set of
sequences.
"""
aln_len = None
gaps = []
for i, sequence in enumerate(sequences):
if aln_len is None:
aln_len = len(sequence)
gaps = [0] * aln_len
else:
if not len(sequence) == aln_len:
raise ValueError(("Unexpected sequence length {0}. Is this "
"an alignment?").format(len(sequence)))
# Update any gap positions in gap list
for j, char in enumerate(sequence.seq):
if char in gap_chars:
gaps[j] += 1
sequence_count = float(i + 1)
gap_props = [i / sequence_count for i in gaps]
return gap_props | python | def gap_proportion(sequences, gap_chars='-'):
"""
Generates a list with the proportion of gaps by index in a set of
sequences.
"""
aln_len = None
gaps = []
for i, sequence in enumerate(sequences):
if aln_len is None:
aln_len = len(sequence)
gaps = [0] * aln_len
else:
if not len(sequence) == aln_len:
raise ValueError(("Unexpected sequence length {0}. Is this "
"an alignment?").format(len(sequence)))
# Update any gap positions in gap list
for j, char in enumerate(sequence.seq):
if char in gap_chars:
gaps[j] += 1
sequence_count = float(i + 1)
gap_props = [i / sequence_count for i in gaps]
return gap_props | [
"def",
"gap_proportion",
"(",
"sequences",
",",
"gap_chars",
"=",
"'-'",
")",
":",
"aln_len",
"=",
"None",
"gaps",
"=",
"[",
"]",
"for",
"i",
",",
"sequence",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"if",
"aln_len",
"is",
"None",
":",
"aln_len",... | Generates a list with the proportion of gaps by index in a set of
sequences. | [
"Generates",
"a",
"list",
"with",
"the",
"proportion",
"of",
"gaps",
"by",
"index",
"in",
"a",
"set",
"of",
"sequences",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L554-L577 | train | 38,975 |
fhcrc/seqmagick | seqmagick/transform.py | squeeze | def squeeze(records, gap_threshold=1.0):
"""
Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions.
"""
with _record_buffer(records) as r:
gap_proportions = gap_proportion(r())
keep_columns = [g < gap_threshold for g in gap_proportions]
for record in r():
sequence = str(record.seq)
# Trim
squeezed = itertools.compress(sequence, keep_columns)
yield SeqRecord(Seq(''.join(squeezed)), id=record.id,
description=record.description) | python | def squeeze(records, gap_threshold=1.0):
"""
Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions.
"""
with _record_buffer(records) as r:
gap_proportions = gap_proportion(r())
keep_columns = [g < gap_threshold for g in gap_proportions]
for record in r():
sequence = str(record.seq)
# Trim
squeezed = itertools.compress(sequence, keep_columns)
yield SeqRecord(Seq(''.join(squeezed)), id=record.id,
description=record.description) | [
"def",
"squeeze",
"(",
"records",
",",
"gap_threshold",
"=",
"1.0",
")",
":",
"with",
"_record_buffer",
"(",
"records",
")",
"as",
"r",
":",
"gap_proportions",
"=",
"gap_proportion",
"(",
"r",
"(",
")",
")",
"keep_columns",
"=",
"[",
"g",
"<",
"gap_thres... | Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions. | [
"Remove",
"any",
"gaps",
"that",
"are",
"present",
"in",
"the",
"same",
"position",
"across",
"all",
"sequences",
"in",
"an",
"alignment",
".",
"Takes",
"a",
"second",
"sequence",
"iterator",
"for",
"determining",
"gap",
"positions",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L580-L596 | train | 38,976 |
fhcrc/seqmagick | seqmagick/transform.py | max_length_discard | def max_length_discard(records, max_length):
"""
Discard any records that are longer than max_length.
"""
logging.info('Applying _max_length_discard generator: '
'discarding records longer than '
'.')
for record in records:
if len(record) > max_length:
# Discard
logging.debug('Discarding long sequence: %s, length=%d',
record.id, len(record))
else:
yield record | python | def max_length_discard(records, max_length):
"""
Discard any records that are longer than max_length.
"""
logging.info('Applying _max_length_discard generator: '
'discarding records longer than '
'.')
for record in records:
if len(record) > max_length:
# Discard
logging.debug('Discarding long sequence: %s, length=%d',
record.id, len(record))
else:
yield record | [
"def",
"max_length_discard",
"(",
"records",
",",
"max_length",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _max_length_discard generator: '",
"'discarding records longer than '",
"'.'",
")",
"for",
"record",
"in",
"records",
":",
"if",
"len",
"(",
"record",
"... | Discard any records that are longer than max_length. | [
"Discard",
"any",
"records",
"that",
"are",
"longer",
"than",
"max_length",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L711-L724 | train | 38,977 |
fhcrc/seqmagick | seqmagick/transform.py | min_length_discard | def min_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length.
"""
logging.info('Applying _min_length_discard generator: '
'discarding records shorter than %d.', min_length)
for record in records:
if len(record) < min_length:
logging.debug('Discarding short sequence: %s, length=%d',
record.id, len(record))
else:
yield record | python | def min_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length.
"""
logging.info('Applying _min_length_discard generator: '
'discarding records shorter than %d.', min_length)
for record in records:
if len(record) < min_length:
logging.debug('Discarding short sequence: %s, length=%d',
record.id, len(record))
else:
yield record | [
"def",
"min_length_discard",
"(",
"records",
",",
"min_length",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _min_length_discard generator: '",
"'discarding records shorter than %d.'",
",",
"min_length",
")",
"for",
"record",
"in",
"records",
":",
"if",
"len",
"(... | Discard any records that are shorter than min_length. | [
"Discard",
"any",
"records",
"that",
"are",
"shorter",
"than",
"min_length",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L727-L738 | train | 38,978 |
fhcrc/seqmagick | seqmagick/transform.py | min_ungap_length_discard | def min_ungap_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length after removing gaps.
"""
for record in records:
if len(ungap_all(record)) >= min_length:
yield record | python | def min_ungap_length_discard(records, min_length):
"""
Discard any records that are shorter than min_length after removing gaps.
"""
for record in records:
if len(ungap_all(record)) >= min_length:
yield record | [
"def",
"min_ungap_length_discard",
"(",
"records",
",",
"min_length",
")",
":",
"for",
"record",
"in",
"records",
":",
"if",
"len",
"(",
"ungap_all",
"(",
"record",
")",
")",
">=",
"min_length",
":",
"yield",
"record"
] | Discard any records that are shorter than min_length after removing gaps. | [
"Discard",
"any",
"records",
"that",
"are",
"shorter",
"than",
"min_length",
"after",
"removing",
"gaps",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L741-L747 | train | 38,979 |
fhcrc/seqmagick | seqmagick/subcommands/backtrans_align.py | batch | def batch(iterable, chunk_size):
"""
Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter.
"""
i = iter(iterable)
while True:
r = list(itertools.islice(i, chunk_size))
if not r:
break
yield r | python | def batch(iterable, chunk_size):
"""
Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter.
"""
i = iter(iterable)
while True:
r = list(itertools.islice(i, chunk_size))
if not r:
break
yield r | [
"def",
"batch",
"(",
"iterable",
",",
"chunk_size",
")",
":",
"i",
"=",
"iter",
"(",
"iterable",
")",
"while",
"True",
":",
"r",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"i",
",",
"chunk_size",
")",
")",
"if",
"not",
"r",
":",
"break",
... | Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter. | [
"Return",
"items",
"from",
"iterable",
"in",
"chunk_size",
"bits",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/backtrans_align.py#L51-L62 | train | 38,980 |
fhcrc/seqmagick | seqmagick/subcommands/backtrans_align.py | AlignmentMapper._validate_translation | def _validate_translation(self, aligned_prot, aligned_nucl):
"""
Given a seq for protein and nucleotide, ensure that the translation holds
"""
codons = [''.join(i) for i in batch(str(aligned_nucl), 3)]
for codon, aa in zip(codons, str(aligned_prot)):
# Check gaps
if codon == '---' and aa == '-':
continue
try:
trans = self.translation_table.forward_table[codon]
if not trans == aa:
raise ValueError("Codon {0} translates to {1}, not {2}".format(
codon, trans, aa))
except (KeyError, CodonTable.TranslationError):
if aa != 'X':
if self.unknown_action == 'fail':
raise ValueError("Unknown codon: {0} mapped to {1}".format(
codon, aa))
elif self.unknown_action == 'warn':
logging.warn('Cannot verify that unknown codon %s '
'maps to %s', codon, aa)
return True | python | def _validate_translation(self, aligned_prot, aligned_nucl):
"""
Given a seq for protein and nucleotide, ensure that the translation holds
"""
codons = [''.join(i) for i in batch(str(aligned_nucl), 3)]
for codon, aa in zip(codons, str(aligned_prot)):
# Check gaps
if codon == '---' and aa == '-':
continue
try:
trans = self.translation_table.forward_table[codon]
if not trans == aa:
raise ValueError("Codon {0} translates to {1}, not {2}".format(
codon, trans, aa))
except (KeyError, CodonTable.TranslationError):
if aa != 'X':
if self.unknown_action == 'fail':
raise ValueError("Unknown codon: {0} mapped to {1}".format(
codon, aa))
elif self.unknown_action == 'warn':
logging.warn('Cannot verify that unknown codon %s '
'maps to %s', codon, aa)
return True | [
"def",
"_validate_translation",
"(",
"self",
",",
"aligned_prot",
",",
"aligned_nucl",
")",
":",
"codons",
"=",
"[",
"''",
".",
"join",
"(",
"i",
")",
"for",
"i",
"in",
"batch",
"(",
"str",
"(",
"aligned_nucl",
")",
",",
"3",
")",
"]",
"for",
"codon"... | Given a seq for protein and nucleotide, ensure that the translation holds | [
"Given",
"a",
"seq",
"for",
"protein",
"and",
"nucleotide",
"ensure",
"that",
"the",
"translation",
"holds"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/backtrans_align.py#L70-L93 | train | 38,981 |
fhcrc/seqmagick | seqmagick/subcommands/backtrans_align.py | AlignmentMapper.map_all | def map_all(self, prot_alignment, nucl_sequences):
"""
Convert protein sequences to nucleotide alignment
"""
zipped = itertools.zip_longest(prot_alignment, nucl_sequences)
for p, n in zipped:
if p is None:
raise ValueError("Exhausted protein sequences")
elif n is None:
raise ValueError("Exhausted nucleotide sequences")
yield self.map_alignment(p, n) | python | def map_all(self, prot_alignment, nucl_sequences):
"""
Convert protein sequences to nucleotide alignment
"""
zipped = itertools.zip_longest(prot_alignment, nucl_sequences)
for p, n in zipped:
if p is None:
raise ValueError("Exhausted protein sequences")
elif n is None:
raise ValueError("Exhausted nucleotide sequences")
yield self.map_alignment(p, n) | [
"def",
"map_all",
"(",
"self",
",",
"prot_alignment",
",",
"nucl_sequences",
")",
":",
"zipped",
"=",
"itertools",
".",
"zip_longest",
"(",
"prot_alignment",
",",
"nucl_sequences",
")",
"for",
"p",
",",
"n",
"in",
"zipped",
":",
"if",
"p",
"is",
"None",
... | Convert protein sequences to nucleotide alignment | [
"Convert",
"protein",
"sequences",
"to",
"nucleotide",
"alignment"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/backtrans_align.py#L138-L148 | train | 38,982 |
fhcrc/seqmagick | seqmagick/fileformat.py | from_extension | def from_extension(extension):
"""
Look up the BioPython file type corresponding with input extension.
Look up is case insensitive.
"""
if not extension.startswith('.'):
raise ValueError("Extensions must begin with a period.")
try:
return EXTENSION_TO_TYPE[extension.lower()]
except KeyError:
raise UnknownExtensionError(
"seqmagick does not know how to handle " +
"files with extensions like this: " + extension) | python | def from_extension(extension):
"""
Look up the BioPython file type corresponding with input extension.
Look up is case insensitive.
"""
if not extension.startswith('.'):
raise ValueError("Extensions must begin with a period.")
try:
return EXTENSION_TO_TYPE[extension.lower()]
except KeyError:
raise UnknownExtensionError(
"seqmagick does not know how to handle " +
"files with extensions like this: " + extension) | [
"def",
"from_extension",
"(",
"extension",
")",
":",
"if",
"not",
"extension",
".",
"startswith",
"(",
"'.'",
")",
":",
"raise",
"ValueError",
"(",
"\"Extensions must begin with a period.\"",
")",
"try",
":",
"return",
"EXTENSION_TO_TYPE",
"[",
"extension",
".",
... | Look up the BioPython file type corresponding with input extension.
Look up is case insensitive. | [
"Look",
"up",
"the",
"BioPython",
"file",
"type",
"corresponding",
"with",
"input",
"extension",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/fileformat.py#L45-L58 | train | 38,983 |
fhcrc/seqmagick | seqmagick/fileformat.py | from_filename | def from_filename(file_name):
"""
Look up the BioPython file type corresponding to an input file name.
"""
base, extension = os.path.splitext(file_name)
if extension in COMPRESS_EXT:
# Compressed file
extension = os.path.splitext(base)[1]
return from_extension(extension) | python | def from_filename(file_name):
"""
Look up the BioPython file type corresponding to an input file name.
"""
base, extension = os.path.splitext(file_name)
if extension in COMPRESS_EXT:
# Compressed file
extension = os.path.splitext(base)[1]
return from_extension(extension) | [
"def",
"from_filename",
"(",
"file_name",
")",
":",
"base",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"if",
"extension",
"in",
"COMPRESS_EXT",
":",
"# Compressed file",
"extension",
"=",
"os",
".",
"path",
".",
"spli... | Look up the BioPython file type corresponding to an input file name. | [
"Look",
"up",
"the",
"BioPython",
"file",
"type",
"corresponding",
"to",
"an",
"input",
"file",
"name",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/fileformat.py#L61-L69 | train | 38,984 |
fhcrc/seqmagick | seqmagick/fileformat.py | from_handle | def from_handle(fh, stream_default='fasta'):
"""
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
"""
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default
return from_filename(fh.name) | python | def from_handle(fh, stream_default='fasta'):
"""
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
"""
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default
return from_filename(fh.name) | [
"def",
"from_handle",
"(",
"fh",
",",
"stream_default",
"=",
"'fasta'",
")",
":",
"if",
"fh",
"in",
"(",
"sys",
".",
"stdin",
",",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
")",
":",
"return",
"stream_default",
"return",
"from_filename",
"(",
"f... | Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used. | [
"Look",
"up",
"the",
"BioPython",
"file",
"type",
"corresponding",
"to",
"a",
"file",
"-",
"like",
"object",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/fileformat.py#L72-L80 | train | 38,985 |
fhcrc/seqmagick | seqmagick/scripts/cli.py | parse_arguments | def parse_arguments(argv):
"""
Extract command-line arguments for different actions.
"""
parser = argparse.ArgumentParser(description='seqmagick - Manipulate ' + \
' sequence files.', prog='seqmagick')
parser.add_argument('-V', '--version', action='version',
version='seqmagick v' + version,
help="Print the version number and exit")
parser.add_argument('-v', '--verbose', dest='verbosity',
action='count', default=1,
help="Be more verbose. Specify -vv or -vvv for even more")
parser.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbosity', help="Suppress output")
# Subparsers
subparsers = parser.add_subparsers(dest='subparser_name')
parser_help = subparsers.add_parser('help',
help='Detailed help for actions using help <action>')
parser_help.add_argument('action')
# Add actions
actions = {}
for name, mod in subcommands.itermodules():
subparser = subparsers.add_parser(name, help=mod.__doc__,
description=mod.__doc__)
mod.build_parser(subparser)
actions[name] = mod.action
arguments = parser.parse_args(argv)
arguments.argv = argv
action = arguments.subparser_name
if action == 'help':
return parse_arguments([str(arguments.action), '-h'])
return actions[action], arguments | python | def parse_arguments(argv):
"""
Extract command-line arguments for different actions.
"""
parser = argparse.ArgumentParser(description='seqmagick - Manipulate ' + \
' sequence files.', prog='seqmagick')
parser.add_argument('-V', '--version', action='version',
version='seqmagick v' + version,
help="Print the version number and exit")
parser.add_argument('-v', '--verbose', dest='verbosity',
action='count', default=1,
help="Be more verbose. Specify -vv or -vvv for even more")
parser.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbosity', help="Suppress output")
# Subparsers
subparsers = parser.add_subparsers(dest='subparser_name')
parser_help = subparsers.add_parser('help',
help='Detailed help for actions using help <action>')
parser_help.add_argument('action')
# Add actions
actions = {}
for name, mod in subcommands.itermodules():
subparser = subparsers.add_parser(name, help=mod.__doc__,
description=mod.__doc__)
mod.build_parser(subparser)
actions[name] = mod.action
arguments = parser.parse_args(argv)
arguments.argv = argv
action = arguments.subparser_name
if action == 'help':
return parse_arguments([str(arguments.action), '-h'])
return actions[action], arguments | [
"def",
"parse_arguments",
"(",
"argv",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'seqmagick - Manipulate '",
"+",
"' sequence files.'",
",",
"prog",
"=",
"'seqmagick'",
")",
"parser",
".",
"add_argument",
"(",
"'-V'",
... | Extract command-line arguments for different actions. | [
"Extract",
"command",
"-",
"line",
"arguments",
"for",
"different",
"actions",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/scripts/cli.py#L32-L71 | train | 38,986 |
fhcrc/seqmagick | seqmagick/subcommands/primer_trim.py | ungap_index_map | def ungap_index_map(sequence, gap_chars='-'):
"""
Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4}
"""
counter = itertools.count(0).__next__
ungap_indexes = [
counter() if c not in gap_chars else None for c in iter(sequence)
]
return dict(
(ungapped, gapped)
for ungapped, gapped in zip(ungap_indexes, range(len(sequence)))
if ungapped is not None) | python | def ungap_index_map(sequence, gap_chars='-'):
"""
Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4}
"""
counter = itertools.count(0).__next__
ungap_indexes = [
counter() if c not in gap_chars else None for c in iter(sequence)
]
return dict(
(ungapped, gapped)
for ungapped, gapped in zip(ungap_indexes, range(len(sequence)))
if ungapped is not None) | [
"def",
"ungap_index_map",
"(",
"sequence",
",",
"gap_chars",
"=",
"'-'",
")",
":",
"counter",
"=",
"itertools",
".",
"count",
"(",
"0",
")",
".",
"__next__",
"ungap_indexes",
"=",
"[",
"counter",
"(",
")",
"if",
"c",
"not",
"in",
"gap_chars",
"else",
"... | Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4} | [
"Returns",
"a",
"dict",
"mapping",
"from",
"an",
"index",
"in",
"the",
"ungapped",
"sequence",
"to",
"an",
"index",
"in",
"the",
"gapped",
"sequence",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/primer_trim.py#L63-L78 | train | 38,987 |
fhcrc/seqmagick | seqmagick/subcommands/primer_trim.py | _iupac_ambiguous_equal | def _iupac_ambiguous_equal(ambig_base, unambig_base):
"""
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
"""
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()] | python | def _iupac_ambiguous_equal(ambig_base, unambig_base):
"""
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
"""
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()] | [
"def",
"_iupac_ambiguous_equal",
"(",
"ambig_base",
",",
"unambig_base",
")",
":",
"iupac_translation",
"=",
"{",
"'A'",
":",
"'A'",
",",
"'C'",
":",
"'C'",
",",
"'G'",
":",
"'G'",
",",
"'T'",
":",
"'T'",
",",
"'U'",
":",
"'U'",
",",
"'R'",
":",
"'AG... | Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT | [
"Tests",
"two",
"bases",
"for",
"equality",
"accounting",
"for",
"IUPAC",
"ambiguous",
"DNA"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/primer_trim.py#L93-L122 | train | 38,988 |
fhcrc/seqmagick | seqmagick/subcommands/primer_trim.py | hamming_distance | def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | python | def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | [
"def",
"hamming_distance",
"(",
"s1",
",",
"s2",
",",
"equality_function",
"=",
"operator",
".",
"eq",
")",
":",
"if",
"not",
"len",
"(",
"s1",
")",
"==",
"len",
"(",
"s2",
")",
":",
"raise",
"ValueError",
"(",
"\"String lengths are not equal\"",
")",
"#... | Returns the hamming distance between two strings. | [
"Returns",
"the",
"hamming",
"distance",
"between",
"two",
"strings",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/primer_trim.py#L125-L133 | train | 38,989 |
fhcrc/seqmagick | seqmagick/subcommands/primer_trim.py | trim | def trim(sequences, start, end):
"""
Slice the input sequences from start to end
"""
logging.info("Trimming from %d to %d", start, end)
return (sequence[start:end] for sequence in sequences) | python | def trim(sequences, start, end):
"""
Slice the input sequences from start to end
"""
logging.info("Trimming from %d to %d", start, end)
return (sequence[start:end] for sequence in sequences) | [
"def",
"trim",
"(",
"sequences",
",",
"start",
",",
"end",
")",
":",
"logging",
".",
"info",
"(",
"\"Trimming from %d to %d\"",
",",
"start",
",",
"end",
")",
"return",
"(",
"sequence",
"[",
"start",
":",
"end",
"]",
"for",
"sequence",
"in",
"sequences",... | Slice the input sequences from start to end | [
"Slice",
"the",
"input",
"sequences",
"from",
"start",
"to",
"end"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/primer_trim.py#L275-L280 | train | 38,990 |
fhcrc/seqmagick | seqmagick/subcommands/primer_trim.py | action | def action(arguments):
"""
Trim the alignment as specified
"""
# Determine file format for input and output
source_format = (arguments.source_format or
fileformat.from_handle(arguments.source_file))
output_format = (arguments.output_format or
fileformat.from_handle(arguments.output_file))
# Load the alignment
with arguments.source_file:
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Locate primers
(forward_start, forward_end), (reverse_start, reverse_end) = locate_primers(
sequences, arguments.forward_primer,
arguments.reverse_primer, arguments.reverse_complement,
arguments.max_hamming_distance)
# Generate slice indexes
if arguments.include_primers:
start = forward_start
end = reverse_end + 1
else:
start = forward_end + 1
end = reverse_start
# Rewind the input file
arguments.source_file.seek(0)
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Apply the transformation
prune_action = _ACTIONS[arguments.prune_action]
transformed_sequences = prune_action(sequences, start, end)
with arguments.output_file:
SeqIO.write(transformed_sequences, arguments.output_file,
output_format) | python | def action(arguments):
"""
Trim the alignment as specified
"""
# Determine file format for input and output
source_format = (arguments.source_format or
fileformat.from_handle(arguments.source_file))
output_format = (arguments.output_format or
fileformat.from_handle(arguments.output_file))
# Load the alignment
with arguments.source_file:
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Locate primers
(forward_start, forward_end), (reverse_start, reverse_end) = locate_primers(
sequences, arguments.forward_primer,
arguments.reverse_primer, arguments.reverse_complement,
arguments.max_hamming_distance)
# Generate slice indexes
if arguments.include_primers:
start = forward_start
end = reverse_end + 1
else:
start = forward_end + 1
end = reverse_start
# Rewind the input file
arguments.source_file.seek(0)
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Apply the transformation
prune_action = _ACTIONS[arguments.prune_action]
transformed_sequences = prune_action(sequences, start, end)
with arguments.output_file:
SeqIO.write(transformed_sequences, arguments.output_file,
output_format) | [
"def",
"action",
"(",
"arguments",
")",
":",
"# Determine file format for input and output",
"source_format",
"=",
"(",
"arguments",
".",
"source_format",
"or",
"fileformat",
".",
"from_handle",
"(",
"arguments",
".",
"source_file",
")",
")",
"output_format",
"=",
"... | Trim the alignment as specified | [
"Trim",
"the",
"alignment",
"as",
"specified"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/primer_trim.py#L287-L331 | train | 38,991 |
fhcrc/seqmagick | examples/apply-function/myfunctions.py | hash_starts_numeric | def hash_starts_numeric(records):
"""
Very useful function that only accepts records with a numeric start to
their sha-1 hash.
"""
for record in records:
seq_hash = hashlib.sha1(str(record.seq)).hexdigest()
if seq_hash[0].isdigit():
yield record | python | def hash_starts_numeric(records):
"""
Very useful function that only accepts records with a numeric start to
their sha-1 hash.
"""
for record in records:
seq_hash = hashlib.sha1(str(record.seq)).hexdigest()
if seq_hash[0].isdigit():
yield record | [
"def",
"hash_starts_numeric",
"(",
"records",
")",
":",
"for",
"record",
"in",
"records",
":",
"seq_hash",
"=",
"hashlib",
".",
"sha1",
"(",
"str",
"(",
"record",
".",
"seq",
")",
")",
".",
"hexdigest",
"(",
")",
"if",
"seq_hash",
"[",
"0",
"]",
".",... | Very useful function that only accepts records with a numeric start to
their sha-1 hash. | [
"Very",
"useful",
"function",
"that",
"only",
"accepts",
"records",
"with",
"a",
"numeric",
"start",
"to",
"their",
"sha",
"-",
"1",
"hash",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/examples/apply-function/myfunctions.py#L11-L19 | train | 38,992 |
fhcrc/seqmagick | seqmagick/subcommands/common.py | atomic_write | def atomic_write(path, mode='wt', permissions=None, file_factory=None, **kwargs):
"""
Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile
"""
if permissions is None:
permissions = apply_umask()
# Handle stdout:
if path == '-':
yield sys.stdout
else:
base_dir = os.path.dirname(path)
kwargs['suffix'] = os.path.basename(path)
tf = tempfile.NamedTemporaryFile(
dir=base_dir, mode=mode, delete=False, **kwargs)
# If a file_factory is given, close, and re-open a handle using the
# file_factory
if file_factory is not None:
tf.close()
tf = file_factory(tf.name)
try:
with tf:
yield tf
# Move
os.rename(tf.name, path)
os.chmod(path, permissions)
except:
os.remove(tf.name)
raise | python | def atomic_write(path, mode='wt', permissions=None, file_factory=None, **kwargs):
"""
Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile
"""
if permissions is None:
permissions = apply_umask()
# Handle stdout:
if path == '-':
yield sys.stdout
else:
base_dir = os.path.dirname(path)
kwargs['suffix'] = os.path.basename(path)
tf = tempfile.NamedTemporaryFile(
dir=base_dir, mode=mode, delete=False, **kwargs)
# If a file_factory is given, close, and re-open a handle using the
# file_factory
if file_factory is not None:
tf.close()
tf = file_factory(tf.name)
try:
with tf:
yield tf
# Move
os.rename(tf.name, path)
os.chmod(path, permissions)
except:
os.remove(tf.name)
raise | [
"def",
"atomic_write",
"(",
"path",
",",
"mode",
"=",
"'wt'",
",",
"permissions",
"=",
"None",
",",
"file_factory",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"permissions",
"is",
"None",
":",
"permissions",
"=",
"apply_umask",
"(",
")",
"# ... | Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile | [
"Open",
"a",
"file",
"for",
"atomic",
"writing",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/common.py#L35-L72 | train | 38,993 |
fhcrc/seqmagick | seqmagick/subcommands/common.py | typed_range | def typed_range(type_func, minimum, maximum):
"""
Require variables to be of the specified type, between minimum and maximum
"""
@functools.wraps(type_func)
def inner(string):
result = type_func(string)
if not result >= minimum and result <= maximum:
raise argparse.ArgumentTypeError(
"Please provide a value between {0} and {1}".format(
minimum, maximum))
return result
return inner | python | def typed_range(type_func, minimum, maximum):
"""
Require variables to be of the specified type, between minimum and maximum
"""
@functools.wraps(type_func)
def inner(string):
result = type_func(string)
if not result >= minimum and result <= maximum:
raise argparse.ArgumentTypeError(
"Please provide a value between {0} and {1}".format(
minimum, maximum))
return result
return inner | [
"def",
"typed_range",
"(",
"type_func",
",",
"minimum",
",",
"maximum",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"type_func",
")",
"def",
"inner",
"(",
"string",
")",
":",
"result",
"=",
"type_func",
"(",
"string",
")",
"if",
"not",
"result",
">=... | Require variables to be of the specified type, between minimum and maximum | [
"Require",
"variables",
"to",
"be",
"of",
"the",
"specified",
"type",
"between",
"minimum",
"and",
"maximum"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/common.py#L110-L122 | train | 38,994 |
fhcrc/seqmagick | seqmagick/subcommands/common.py | partial_append_action | def partial_append_action(fn, argument_keys=None):
"""
Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply.
"""
if isinstance(argument_keys, str):
argument_keys = [argument_keys]
argument_keys = argument_keys or []
class PartialAppendAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const=None,
default=None,
required=False,
help=None,
type=None,
metavar=None,
nargs=None,
**kwargs):
super(PartialAppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=len(argument_keys),
const=const,
default=default,
required=required,
metavar=metavar,
type=type,
help=help, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(getattr(namespace, self.dest, None)) or []
# If no value was set default to empty list
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
if len(argument_keys) != len(values):
raise ValueError("Unexpected number of values")
# Generate keyword arguments for the input function
kwargs = dict(list(zip(argument_keys, values)))
f = functools.partial(fn, **kwargs)
items.append(f)
setattr(namespace, self.dest, items)
return PartialAppendAction | python | def partial_append_action(fn, argument_keys=None):
"""
Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply.
"""
if isinstance(argument_keys, str):
argument_keys = [argument_keys]
argument_keys = argument_keys or []
class PartialAppendAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const=None,
default=None,
required=False,
help=None,
type=None,
metavar=None,
nargs=None,
**kwargs):
super(PartialAppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=len(argument_keys),
const=const,
default=default,
required=required,
metavar=metavar,
type=type,
help=help, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(getattr(namespace, self.dest, None)) or []
# If no value was set default to empty list
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
if len(argument_keys) != len(values):
raise ValueError("Unexpected number of values")
# Generate keyword arguments for the input function
kwargs = dict(list(zip(argument_keys, values)))
f = functools.partial(fn, **kwargs)
items.append(f)
setattr(namespace, self.dest, items)
return PartialAppendAction | [
"def",
"partial_append_action",
"(",
"fn",
",",
"argument_keys",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"argument_keys",
",",
"str",
")",
":",
"argument_keys",
"=",
"[",
"argument_keys",
"]",
"argument_keys",
"=",
"argument_keys",
"or",
"[",
"]",
"c... | Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply. | [
"Creates",
"a",
"new",
"class",
"extending",
"argparse",
".",
"Action",
"which",
"appends",
"a",
"partially",
"-",
"applied",
"function",
"to",
"dest",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/common.py#L125-L178 | train | 38,995 |
fhcrc/seqmagick | seqmagick/subcommands/common.py | positive_value | def positive_value(target_type):
"""
Wraps target_type in a function that requires the parsed argument
be >= 0
"""
def inner(string):
value = target_type(string)
if not value >= 0:
raise argparse.ArgumentTypeError("Invalid positive number: " +
string)
return value
return inner | python | def positive_value(target_type):
"""
Wraps target_type in a function that requires the parsed argument
be >= 0
"""
def inner(string):
value = target_type(string)
if not value >= 0:
raise argparse.ArgumentTypeError("Invalid positive number: " +
string)
return value
return inner | [
"def",
"positive_value",
"(",
"target_type",
")",
":",
"def",
"inner",
"(",
"string",
")",
":",
"value",
"=",
"target_type",
"(",
"string",
")",
"if",
"not",
"value",
">=",
"0",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"Invalid positive num... | Wraps target_type in a function that requires the parsed argument
be >= 0 | [
"Wraps",
"target_type",
"in",
"a",
"function",
"that",
"requires",
"the",
"parsed",
"argument",
"be",
">",
"=",
"0"
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/common.py#L181-L193 | train | 38,996 |
fhcrc/seqmagick | seqmagick/subcommands/convert.py | build_parser | def build_parser(parser):
"""
Add shared arguments to the convert or mogrify parser.
"""
add_options(parser)
parser.add_argument('source_file', type=common.FileType('rt'),
help="Input sequence file")
parser.add_argument('dest_file', help="Output file")
return parser | python | def build_parser(parser):
"""
Add shared arguments to the convert or mogrify parser.
"""
add_options(parser)
parser.add_argument('source_file', type=common.FileType('rt'),
help="Input sequence file")
parser.add_argument('dest_file', help="Output file")
return parser | [
"def",
"build_parser",
"(",
"parser",
")",
":",
"add_options",
"(",
"parser",
")",
"parser",
".",
"add_argument",
"(",
"'source_file'",
",",
"type",
"=",
"common",
".",
"FileType",
"(",
"'rt'",
")",
",",
"help",
"=",
"\"Input sequence file\"",
")",
"parser",... | Add shared arguments to the convert or mogrify parser. | [
"Add",
"shared",
"arguments",
"to",
"the",
"convert",
"or",
"mogrify",
"parser",
"."
] | 1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/convert.py#L222-L231 | train | 38,997 |
phac-nml/sistr_cmd | sistr/src/parsers.py | parse_fasta | def parse_fasta(filepath):
'''
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
'''
with open(filepath, 'r') as f:
seqs = []
header = ''
for line in f:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if header == '':
header = line.replace('>','')
else:
yield header, ''.join(seqs)
seqs = []
header = line.replace('>','')
else:
seqs.append(line)
yield header, ''.join(seqs) | python | def parse_fasta(filepath):
'''
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
'''
with open(filepath, 'r') as f:
seqs = []
header = ''
for line in f:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if header == '':
header = line.replace('>','')
else:
yield header, ''.join(seqs)
seqs = []
header = line.replace('>','')
else:
seqs.append(line)
yield header, ''.join(seqs) | [
"def",
"parse_fasta",
"(",
"filepath",
")",
":",
"with",
"open",
"(",
"filepath",
",",
"'r'",
")",
"as",
"f",
":",
"seqs",
"=",
"[",
"]",
"header",
"=",
"''",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"li... | Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>) | [
"Parse",
"a",
"fasta",
"file",
"returning",
"a",
"generator",
"yielding",
"tuples",
"of",
"fasta",
"headers",
"to",
"sequences",
"."
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/parsers.py#L22-L61 | train | 38,998 |
phac-nml/sistr_cmd | sistr/src/parsers.py | fasta_format_check | def fasta_format_check(fasta_path, logger):
"""
Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format
"""
header_count = 0
line_count = 1
nt_count = 0
with open(fasta_path) as f:
for l in f:
l = l.strip()
if l == '':
continue
if l[0] == '>':
header_count += 1
continue
if header_count == 0 and l[0] != '>':
error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \
.format(line_count=line_count)
logger.error(error_msg)
raise Exception(error_msg)
non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES
if len(non_nucleotide_chars_in_line) > 0:
error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \
.format(line=line_count,
non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))
logger.error(error_msg)
raise Exception(error_msg)
nt_count += len(l)
line_count += 1
if nt_count == 0:
error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path)
logger.error(error_msg)
raise Exception(error_msg)
logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count)) | python | def fasta_format_check(fasta_path, logger):
"""
Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format
"""
header_count = 0
line_count = 1
nt_count = 0
with open(fasta_path) as f:
for l in f:
l = l.strip()
if l == '':
continue
if l[0] == '>':
header_count += 1
continue
if header_count == 0 and l[0] != '>':
error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \
.format(line_count=line_count)
logger.error(error_msg)
raise Exception(error_msg)
non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES
if len(non_nucleotide_chars_in_line) > 0:
error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \
.format(line=line_count,
non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))
logger.error(error_msg)
raise Exception(error_msg)
nt_count += len(l)
line_count += 1
if nt_count == 0:
error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path)
logger.error(error_msg)
raise Exception(error_msg)
logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count)) | [
"def",
"fasta_format_check",
"(",
"fasta_path",
",",
"logger",
")",
":",
"header_count",
"=",
"0",
"line_count",
"=",
"1",
"nt_count",
"=",
"0",
"with",
"open",
"(",
"fasta_path",
")",
"as",
"f",
":",
"for",
"l",
"in",
"f",
":",
"l",
"=",
"l",
".",
... | Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format | [
"Check",
"that",
"a",
"file",
"is",
"valid",
"FASTA",
"format",
"."
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/parsers.py#L64-L110 | train | 38,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.