repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
tanghaibao/jcvi | jcvi/apps/base.py | touch | def touch(args):
"""
%prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp().
"""
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
# Check if the time has changed, with resolution up to 1 sec
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print(msg, file=sys.stderr)
os.utime(path, (atime, mtime)) | python | def touch(args):
"""
%prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp().
"""
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
# Check if the time has changed, with resolution up to 1 sec
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print(msg, file=sys.stderr)
os.utime(path, (atime, mtime)) | [
"def",
"touch",
"(",
"args",
")",
":",
"from",
"time",
"import",
"ctime",
"p",
"=",
"OptionParser",
"(",
"touch",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
"... | %prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp(). | [
"%prog",
"touch",
"timestamp",
".",
"info"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1132-L1164 | train | 200,900 |
tanghaibao/jcvi | jcvi/apps/base.py | less | def less(args):
"""
%prog less filename position | less
Enhance the unix `less` command by seeking to a file location first. This is
useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber.
$ %prog less myfile 0.1 # Go to 10% of the current file and streaming
$ %prog less myfile 0.1,0.2 # Stream at several positions
$ %prog less myfile 100 # Go to certain byte number and streaming
$ %prog less myfile 100,200 # Stream at several positions
$ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..)
"""
from jcvi.formats.base import must_open
p = OptionParser(less.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
filename, pos = args
fsize = getfilesize(filename)
if pos == "all":
pos = [x / 10. for x in range(0, 10)]
else:
pos = [float(x) for x in pos.split(",")]
if pos[0] > 1:
pos = [x / fsize for x in pos]
if len(pos) > 1:
counts = 20
else:
counts = None
fp = must_open(filename)
for p in pos:
snapshot(fp, p, fsize, counts=counts) | python | def less(args):
"""
%prog less filename position | less
Enhance the unix `less` command by seeking to a file location first. This is
useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber.
$ %prog less myfile 0.1 # Go to 10% of the current file and streaming
$ %prog less myfile 0.1,0.2 # Stream at several positions
$ %prog less myfile 100 # Go to certain byte number and streaming
$ %prog less myfile 100,200 # Stream at several positions
$ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..)
"""
from jcvi.formats.base import must_open
p = OptionParser(less.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
filename, pos = args
fsize = getfilesize(filename)
if pos == "all":
pos = [x / 10. for x in range(0, 10)]
else:
pos = [float(x) for x in pos.split(",")]
if pos[0] > 1:
pos = [x / fsize for x in pos]
if len(pos) > 1:
counts = 20
else:
counts = None
fp = must_open(filename)
for p in pos:
snapshot(fp, p, fsize, counts=counts) | [
"def",
"less",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"must_open",
"p",
"=",
"OptionParser",
"(",
"less",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"... | %prog less filename position | less
Enhance the unix `less` command by seeking to a file location first. This is
useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber.
$ %prog less myfile 0.1 # Go to 10% of the current file and streaming
$ %prog less myfile 0.1,0.2 # Stream at several positions
$ %prog less myfile 100 # Go to certain byte number and streaming
$ %prog less myfile 100,200 # Stream at several positions
$ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..) | [
"%prog",
"less",
"filename",
"position",
"|",
"less"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1182-L1221 | train | 200,901 |
tanghaibao/jcvi | jcvi/apps/base.py | pushover | def pushover(message, token, user, title="JCVI: Job Monitor", \
priority=0, timestamp=None):
"""
pushover.net python API
<https://pushover.net/faq#library-python>
"""
assert -1 <= priority <= 2, \
"Priority should be an int() between -1 and 2"
if timestamp == None:
from time import time
timestamp = int(time())
retry, expire = (300, 3600) if priority == 2 \
else (None, None)
conn = HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urlencode({
"token": token,
"user": user,
"message": message,
"title": title,
"priority": priority,
"timestamp": timestamp,
"retry": retry,
"expire": expire,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse() | python | def pushover(message, token, user, title="JCVI: Job Monitor", \
priority=0, timestamp=None):
"""
pushover.net python API
<https://pushover.net/faq#library-python>
"""
assert -1 <= priority <= 2, \
"Priority should be an int() between -1 and 2"
if timestamp == None:
from time import time
timestamp = int(time())
retry, expire = (300, 3600) if priority == 2 \
else (None, None)
conn = HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urlencode({
"token": token,
"user": user,
"message": message,
"title": title,
"priority": priority,
"timestamp": timestamp,
"retry": retry,
"expire": expire,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse() | [
"def",
"pushover",
"(",
"message",
",",
"token",
",",
"user",
",",
"title",
"=",
"\"JCVI: Job Monitor\"",
",",
"priority",
"=",
"0",
",",
"timestamp",
"=",
"None",
")",
":",
"assert",
"-",
"1",
"<=",
"priority",
"<=",
"2",
",",
"\"Priority should be an int... | pushover.net python API
<https://pushover.net/faq#library-python> | [
"pushover",
".",
"net",
"python",
"API"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1228-L1257 | train | 200,902 |
tanghaibao/jcvi | jcvi/apps/base.py | pushnotify | def pushnotify(subject, message, api="pushover", priority=0, timestamp=None):
"""
Send push notifications using pre-existing APIs
Requires a config `pushnotify.ini` file in the user home area containing
the necessary api tokens and user keys.
Default API: "pushover"
Config file format:
-------------------
[pushover]
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
user: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
[nma]
apikey: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
[pushbullet]
apikey: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
iden: dddddddddddddddddddddddddddddddddddd
"""
import types
assert type(priority) is int and -1 <= priority <= 2, \
"Priority should be and int() between -1 and 2"
cfgfile = op.join(op.expanduser("~"), "pushnotify.ini")
Config = ConfigParser()
if op.exists(cfgfile):
Config.read(cfgfile)
else:
sys.exit("Push notification config file `{0}`".format(cfgfile) + \
" does not exist!")
if api == "pushover":
cfg = ConfigSectionMap(Config, api)
token, key = cfg["token"], cfg["user"]
pushover(message, token, key, title=subject, \
priority=priority, timestamp=timestamp)
elif api == "nma":
cfg = ConfigSectionMap(Config, api)
apikey = cfg["apikey"]
nma(message, apikey, event=subject, \
priority=priority)
elif api == "pushbullet":
cfg = ConfigSectionMap(Config, api)
apikey, iden = cfg["apikey"], cfg['iden']
pushbullet(message, apikey, iden, title=subject, \
type="note") | python | def pushnotify(subject, message, api="pushover", priority=0, timestamp=None):
"""
Send push notifications using pre-existing APIs
Requires a config `pushnotify.ini` file in the user home area containing
the necessary api tokens and user keys.
Default API: "pushover"
Config file format:
-------------------
[pushover]
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
user: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
[nma]
apikey: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
[pushbullet]
apikey: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
iden: dddddddddddddddddddddddddddddddddddd
"""
import types
assert type(priority) is int and -1 <= priority <= 2, \
"Priority should be and int() between -1 and 2"
cfgfile = op.join(op.expanduser("~"), "pushnotify.ini")
Config = ConfigParser()
if op.exists(cfgfile):
Config.read(cfgfile)
else:
sys.exit("Push notification config file `{0}`".format(cfgfile) + \
" does not exist!")
if api == "pushover":
cfg = ConfigSectionMap(Config, api)
token, key = cfg["token"], cfg["user"]
pushover(message, token, key, title=subject, \
priority=priority, timestamp=timestamp)
elif api == "nma":
cfg = ConfigSectionMap(Config, api)
apikey = cfg["apikey"]
nma(message, apikey, event=subject, \
priority=priority)
elif api == "pushbullet":
cfg = ConfigSectionMap(Config, api)
apikey, iden = cfg["apikey"], cfg['iden']
pushbullet(message, apikey, iden, title=subject, \
type="note") | [
"def",
"pushnotify",
"(",
"subject",
",",
"message",
",",
"api",
"=",
"\"pushover\"",
",",
"priority",
"=",
"0",
",",
"timestamp",
"=",
"None",
")",
":",
"import",
"types",
"assert",
"type",
"(",
"priority",
")",
"is",
"int",
"and",
"-",
"1",
"<=",
"... | Send push notifications using pre-existing APIs
Requires a config `pushnotify.ini` file in the user home area containing
the necessary api tokens and user keys.
Default API: "pushover"
Config file format:
-------------------
[pushover]
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
user: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
[nma]
apikey: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
[pushbullet]
apikey: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
iden: dddddddddddddddddddddddddddddddddddd | [
"Send",
"push",
"notifications",
"using",
"pre",
"-",
"existing",
"APIs"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1305-L1353 | train | 200,903 |
tanghaibao/jcvi | jcvi/apps/base.py | send_email | def send_email(fromaddr, toaddr, subject, message):
"""
Send an email message
"""
from smtplib import SMTP
from email.mime.text import MIMEText
SERVER = "localhost"
_message = MIMEText(message)
_message['Subject'] = subject
_message['From'] = fromaddr
_message['To'] = ", ".join(toaddr)
server = SMTP(SERVER)
server.sendmail(fromaddr, toaddr, _message.as_string())
server.quit() | python | def send_email(fromaddr, toaddr, subject, message):
"""
Send an email message
"""
from smtplib import SMTP
from email.mime.text import MIMEText
SERVER = "localhost"
_message = MIMEText(message)
_message['Subject'] = subject
_message['From'] = fromaddr
_message['To'] = ", ".join(toaddr)
server = SMTP(SERVER)
server.sendmail(fromaddr, toaddr, _message.as_string())
server.quit() | [
"def",
"send_email",
"(",
"fromaddr",
",",
"toaddr",
",",
"subject",
",",
"message",
")",
":",
"from",
"smtplib",
"import",
"SMTP",
"from",
"email",
".",
"mime",
".",
"text",
"import",
"MIMEText",
"SERVER",
"=",
"\"localhost\"",
"_message",
"=",
"MIMEText",
... | Send an email message | [
"Send",
"an",
"email",
"message"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1356-L1371 | train | 200,904 |
tanghaibao/jcvi | jcvi/apps/base.py | get_email_address | def get_email_address(whoami="user"):
""" Auto-generate the FROM and TO email address """
if whoami == "user":
username = getusername()
domain = getdomainname()
myemail = "{0}@{1}".format(username, domain)
return myemail
else:
fromaddr = "notifier-donotreply@{0}".format(getdomainname())
return fromaddr | python | def get_email_address(whoami="user"):
""" Auto-generate the FROM and TO email address """
if whoami == "user":
username = getusername()
domain = getdomainname()
myemail = "{0}@{1}".format(username, domain)
return myemail
else:
fromaddr = "notifier-donotreply@{0}".format(getdomainname())
return fromaddr | [
"def",
"get_email_address",
"(",
"whoami",
"=",
"\"user\"",
")",
":",
"if",
"whoami",
"==",
"\"user\"",
":",
"username",
"=",
"getusername",
"(",
")",
"domain",
"=",
"getdomainname",
"(",
")",
"myemail",
"=",
"\"{0}@{1}\"",
".",
"format",
"(",
"username",
... | Auto-generate the FROM and TO email address | [
"Auto",
"-",
"generate",
"the",
"FROM",
"and",
"TO",
"email",
"address"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1374-L1384 | train | 200,905 |
tanghaibao/jcvi | jcvi/apps/base.py | notify | def notify(args):
"""
%prog notify "Message to be sent"
Send a message via email/push notification.
Email notify: Recipient email address is constructed by joining the login `username`
and `dnsdomainname` of the server
Push notify: Uses available API
"""
from jcvi.utils.iter import flatten
valid_notif_methods.extend(available_push_api.keys())
fromaddr = get_email_address(whoami="notifier")
p = OptionParser(notify.__doc__)
p.add_option("--method", default="email", choices=valid_notif_methods,
help="Specify the mode of notification [default: %default]")
p.add_option("--subject", default="JCVI: job monitor",
help="Specify the subject of the notification message")
p.set_email()
g1 = OptionGroup(p, "Optional `push` parameters")
g1.add_option("--api", default="pushover", \
choices=list(flatten(available_push_api.values())),
help="Specify API used to send the push notification")
g1.add_option("--priority", default=0, type="int",
help="Message priority (-1 <= p <= 2) [default: %default]")
g1.add_option("--timestamp", default=None, type="int", \
dest="timestamp", \
help="Message timestamp in unix format [default: %default]")
p.add_option_group(g1)
opts, args = p.parse_args(args)
if len(args) == 0:
logging.error("Please provide a brief message to be sent")
sys.exit(not p.print_help())
subject = opts.subject
message = " ".join(args).strip()
if opts.method == "email":
toaddr = opts.email.split(",") # TO address should be in a list
for addr in toaddr:
if not is_valid_email(addr):
logging.debug("Email address `{0}` is not valid!".format(addr))
sys.exit()
send_email(fromaddr, toaddr, subject, message)
else:
pushnotify(subject, message, api=opts.api, priority=opts.priority, \
timestamp=opts.timestamp) | python | def notify(args):
"""
%prog notify "Message to be sent"
Send a message via email/push notification.
Email notify: Recipient email address is constructed by joining the login `username`
and `dnsdomainname` of the server
Push notify: Uses available API
"""
from jcvi.utils.iter import flatten
valid_notif_methods.extend(available_push_api.keys())
fromaddr = get_email_address(whoami="notifier")
p = OptionParser(notify.__doc__)
p.add_option("--method", default="email", choices=valid_notif_methods,
help="Specify the mode of notification [default: %default]")
p.add_option("--subject", default="JCVI: job monitor",
help="Specify the subject of the notification message")
p.set_email()
g1 = OptionGroup(p, "Optional `push` parameters")
g1.add_option("--api", default="pushover", \
choices=list(flatten(available_push_api.values())),
help="Specify API used to send the push notification")
g1.add_option("--priority", default=0, type="int",
help="Message priority (-1 <= p <= 2) [default: %default]")
g1.add_option("--timestamp", default=None, type="int", \
dest="timestamp", \
help="Message timestamp in unix format [default: %default]")
p.add_option_group(g1)
opts, args = p.parse_args(args)
if len(args) == 0:
logging.error("Please provide a brief message to be sent")
sys.exit(not p.print_help())
subject = opts.subject
message = " ".join(args).strip()
if opts.method == "email":
toaddr = opts.email.split(",") # TO address should be in a list
for addr in toaddr:
if not is_valid_email(addr):
logging.debug("Email address `{0}` is not valid!".format(addr))
sys.exit()
send_email(fromaddr, toaddr, subject, message)
else:
pushnotify(subject, message, api=opts.api, priority=opts.priority, \
timestamp=opts.timestamp) | [
"def",
"notify",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"flatten",
"valid_notif_methods",
".",
"extend",
"(",
"available_push_api",
".",
"keys",
"(",
")",
")",
"fromaddr",
"=",
"get_email_address",
"(",
"whoami",
"=",
"... | %prog notify "Message to be sent"
Send a message via email/push notification.
Email notify: Recipient email address is constructed by joining the login `username`
and `dnsdomainname` of the server
Push notify: Uses available API | [
"%prog",
"notify",
"Message",
"to",
"be",
"sent"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1421-L1474 | train | 200,906 |
tanghaibao/jcvi | jcvi/apps/base.py | OptionParser.set_db_opts | def set_db_opts(self, dbname="mta4", credentials=True):
"""
Add db connection specific attributes
"""
from jcvi.utils.db import valid_dbconn, get_profile
self.add_option("--db", default=dbname, dest="dbname",
help="Specify name of database to query [default: %default]")
self.add_option("--connector", default="Sybase", dest="dbconn",
choices=valid_dbconn.keys(), help="Specify database connector [default: %default]")
hostname, username, password = get_profile()
if credentials:
self.add_option("--hostname", default=hostname,
help="Specify hostname [default: %default]")
self.add_option("--username", default=username,
help="Username to connect to database [default: %default]")
self.add_option("--password", default=password,
help="Password to connect to database [default: %default]")
self.add_option("--port", type="int",
help="Specify port number [default: %default]") | python | def set_db_opts(self, dbname="mta4", credentials=True):
"""
Add db connection specific attributes
"""
from jcvi.utils.db import valid_dbconn, get_profile
self.add_option("--db", default=dbname, dest="dbname",
help="Specify name of database to query [default: %default]")
self.add_option("--connector", default="Sybase", dest="dbconn",
choices=valid_dbconn.keys(), help="Specify database connector [default: %default]")
hostname, username, password = get_profile()
if credentials:
self.add_option("--hostname", default=hostname,
help="Specify hostname [default: %default]")
self.add_option("--username", default=username,
help="Username to connect to database [default: %default]")
self.add_option("--password", default=password,
help="Password to connect to database [default: %default]")
self.add_option("--port", type="int",
help="Specify port number [default: %default]") | [
"def",
"set_db_opts",
"(",
"self",
",",
"dbname",
"=",
"\"mta4\"",
",",
"credentials",
"=",
"True",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"db",
"import",
"valid_dbconn",
",",
"get_profile",
"self",
".",
"add_option",
"(",
"\"--db\"",
",",
"default",... | Add db connection specific attributes | [
"Add",
"db",
"connection",
"specific",
"attributes"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L238-L257 | train | 200,907 |
tanghaibao/jcvi | jcvi/apps/base.py | OptionParser.set_image_options | def set_image_options(self, args=None, figsize="6x6", dpi=300,
format="pdf", font="Helvetica", palette="deep",
style="darkgrid", cmap="jet"):
"""
Add image format options for given command line programs.
"""
from jcvi.graphics.base import ImageOptions, setup_theme
allowed_format = ("emf", "eps", "pdf", "png", "ps", \
"raw", "rgba", "svg", "svgz")
allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial")
allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks")
allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \
"RdGy", "RdYlBu", "RdYlGn", "Spectral")
group = OptionGroup(self, "Image options")
self.add_option_group(group)
group.add_option("--figsize", default=figsize,
help="Figure size `width`x`height` in inches [default: %default]")
group.add_option("--dpi", default=dpi, type="int",
help="Physical dot density (dots per inch) [default: %default]")
group.add_option("--format", default=format, choices=allowed_format,
help="Generate image of format [default: %default]")
group.add_option("--font", default=font, choices=allowed_fonts,
help="Font name")
group.add_option("--style", default=style, choices=allowed_styles,
help="Axes background")
group.add_option("--diverge", default="PiYG", choices=allowed_diverge,
help="Contrasting color scheme")
group.add_option("--cmap", default=cmap, help="Use this color map")
group.add_option("--notex", default=False, action="store_true",
help="Do not use tex")
if args is None:
args = sys.argv[1:]
opts, args = self.parse_args(args)
assert opts.dpi > 0
assert "x" in opts.figsize
setup_theme(style=opts.style, font=opts.font, usetex=(not opts.notex))
return opts, args, ImageOptions(opts) | python | def set_image_options(self, args=None, figsize="6x6", dpi=300,
format="pdf", font="Helvetica", palette="deep",
style="darkgrid", cmap="jet"):
"""
Add image format options for given command line programs.
"""
from jcvi.graphics.base import ImageOptions, setup_theme
allowed_format = ("emf", "eps", "pdf", "png", "ps", \
"raw", "rgba", "svg", "svgz")
allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial")
allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks")
allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \
"RdGy", "RdYlBu", "RdYlGn", "Spectral")
group = OptionGroup(self, "Image options")
self.add_option_group(group)
group.add_option("--figsize", default=figsize,
help="Figure size `width`x`height` in inches [default: %default]")
group.add_option("--dpi", default=dpi, type="int",
help="Physical dot density (dots per inch) [default: %default]")
group.add_option("--format", default=format, choices=allowed_format,
help="Generate image of format [default: %default]")
group.add_option("--font", default=font, choices=allowed_fonts,
help="Font name")
group.add_option("--style", default=style, choices=allowed_styles,
help="Axes background")
group.add_option("--diverge", default="PiYG", choices=allowed_diverge,
help="Contrasting color scheme")
group.add_option("--cmap", default=cmap, help="Use this color map")
group.add_option("--notex", default=False, action="store_true",
help="Do not use tex")
if args is None:
args = sys.argv[1:]
opts, args = self.parse_args(args)
assert opts.dpi > 0
assert "x" in opts.figsize
setup_theme(style=opts.style, font=opts.font, usetex=(not opts.notex))
return opts, args, ImageOptions(opts) | [
"def",
"set_image_options",
"(",
"self",
",",
"args",
"=",
"None",
",",
"figsize",
"=",
"\"6x6\"",
",",
"dpi",
"=",
"300",
",",
"format",
"=",
"\"pdf\"",
",",
"font",
"=",
"\"Helvetica\"",
",",
"palette",
"=",
"\"deep\"",
",",
"style",
"=",
"\"darkgrid\"... | Add image format options for given command line programs. | [
"Add",
"image",
"format",
"options",
"for",
"given",
"command",
"line",
"programs",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L362-L406 | train | 200,908 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | dedup | def dedup(args):
"""
%prog dedup scaffolds.fasta
Remove redundant contigs with CD-HIT. This is run prior to
assembly.sspace.embed().
"""
from jcvi.formats.fasta import gaps
from jcvi.apps.cdhit import deduplicate, ids
p = OptionParser(dedup.__doc__)
p.set_align(pctid=GoodPct)
p.set_mingap(default=10)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
scaffolds, = args
mingap = opts.mingap
splitfile, oagpfile, cagpfile = gaps([scaffolds, "--split", "--mingap={0}".format(mingap)])
dd = splitfile + ".cdhit"
clstrfile = dd + ".clstr"
idsfile = dd + ".ids"
if need_update(splitfile, clstrfile):
deduplicate([splitfile, "--pctid={0}".format(opts.pctid)])
if need_update(clstrfile, idsfile):
ids([clstrfile])
agp = AGP(cagpfile)
reps = set(x.split()[-1] for x in open(idsfile))
pf = scaffolds.rsplit(".", 1)[0]
dedupagp = pf + ".dedup.agp"
fw = open(dedupagp, "w")
ndropped = ndroppedbases = 0
for a in agp:
if not a.is_gap and a.component_id not in reps:
span = a.component_span
logging.debug("Drop component {0} ({1})".\
format(a.component_id, span))
ndropped += 1
ndroppedbases += span
continue
print(a, file=fw)
fw.close()
logging.debug("Dropped components: {0}, Dropped bases: {1}".\
format(ndropped, ndroppedbases))
logging.debug("Deduplicated file written to `{0}`.".format(dedupagp))
tidyagp = tidy([dedupagp, splitfile])
dedupfasta = pf + ".dedup.fasta"
build([tidyagp, dd, dedupfasta])
return dedupfasta | python | def dedup(args):
"""
%prog dedup scaffolds.fasta
Remove redundant contigs with CD-HIT. This is run prior to
assembly.sspace.embed().
"""
from jcvi.formats.fasta import gaps
from jcvi.apps.cdhit import deduplicate, ids
p = OptionParser(dedup.__doc__)
p.set_align(pctid=GoodPct)
p.set_mingap(default=10)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
scaffolds, = args
mingap = opts.mingap
splitfile, oagpfile, cagpfile = gaps([scaffolds, "--split", "--mingap={0}".format(mingap)])
dd = splitfile + ".cdhit"
clstrfile = dd + ".clstr"
idsfile = dd + ".ids"
if need_update(splitfile, clstrfile):
deduplicate([splitfile, "--pctid={0}".format(opts.pctid)])
if need_update(clstrfile, idsfile):
ids([clstrfile])
agp = AGP(cagpfile)
reps = set(x.split()[-1] for x in open(idsfile))
pf = scaffolds.rsplit(".", 1)[0]
dedupagp = pf + ".dedup.agp"
fw = open(dedupagp, "w")
ndropped = ndroppedbases = 0
for a in agp:
if not a.is_gap and a.component_id not in reps:
span = a.component_span
logging.debug("Drop component {0} ({1})".\
format(a.component_id, span))
ndropped += 1
ndroppedbases += span
continue
print(a, file=fw)
fw.close()
logging.debug("Dropped components: {0}, Dropped bases: {1}".\
format(ndropped, ndroppedbases))
logging.debug("Deduplicated file written to `{0}`.".format(dedupagp))
tidyagp = tidy([dedupagp, splitfile])
dedupfasta = pf + ".dedup.fasta"
build([tidyagp, dd, dedupfasta])
return dedupfasta | [
"def",
"dedup",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"fasta",
"import",
"gaps",
"from",
"jcvi",
".",
"apps",
".",
"cdhit",
"import",
"deduplicate",
",",
"ids",
"p",
"=",
"OptionParser",
"(",
"dedup",
".",
"__doc__",
")",
"p",
"... | %prog dedup scaffolds.fasta
Remove redundant contigs with CD-HIT. This is run prior to
assembly.sspace.embed(). | [
"%prog",
"dedup",
"scaffolds",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L494-L550 | train | 200,909 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | blast | def blast(args):
"""
%prog blast allfasta clonename
Insert a component into agpfile by aligning to the best hit in pool and see
if they have good overlaps.
"""
from jcvi.apps.align import run_megablast
p = OptionParser(blast.__doc__)
p.add_option("-n", type="int", default=2,
help="Take best N hits [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
allfasta, clonename = args
fastadir = "fasta"
infile = op.join(fastadir, clonename + ".fasta")
if not op.exists(infile):
entrez([clonename, "--skipcheck", "--outdir=" + fastadir])
outfile = "{0}.{1}.blast".format(clonename, allfasta.split(".")[0])
run_megablast(infile=infile, outfile=outfile, db=allfasta, \
pctid=GoodPct, hitlen=GoodOverlap)
blasts = [BlastLine(x) for x in open(outfile)]
besthits = []
for b in blasts:
if b.query.count("|") >= 3:
b.query = b.query.split("|")[3]
if b.subject.count("|") >= 3:
b.subject = b.subject.split("|")[3]
b.query = b.query.rsplit(".", 1)[0]
b.subject = b.subject.rsplit(".", 1)[0]
if b.query == b.subject:
continue
if b.subject not in besthits:
besthits.append(b.subject)
if len(besthits) == opts.n:
break
for b in besthits:
overlap([clonename, b, "--dir=" + fastadir]) | python | def blast(args):
"""
%prog blast allfasta clonename
Insert a component into agpfile by aligning to the best hit in pool and see
if they have good overlaps.
"""
from jcvi.apps.align import run_megablast
p = OptionParser(blast.__doc__)
p.add_option("-n", type="int", default=2,
help="Take best N hits [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
allfasta, clonename = args
fastadir = "fasta"
infile = op.join(fastadir, clonename + ".fasta")
if not op.exists(infile):
entrez([clonename, "--skipcheck", "--outdir=" + fastadir])
outfile = "{0}.{1}.blast".format(clonename, allfasta.split(".")[0])
run_megablast(infile=infile, outfile=outfile, db=allfasta, \
pctid=GoodPct, hitlen=GoodOverlap)
blasts = [BlastLine(x) for x in open(outfile)]
besthits = []
for b in blasts:
if b.query.count("|") >= 3:
b.query = b.query.split("|")[3]
if b.subject.count("|") >= 3:
b.subject = b.subject.split("|")[3]
b.query = b.query.rsplit(".", 1)[0]
b.subject = b.subject.rsplit(".", 1)[0]
if b.query == b.subject:
continue
if b.subject not in besthits:
besthits.append(b.subject)
if len(besthits) == opts.n:
break
for b in besthits:
overlap([clonename, b, "--dir=" + fastadir]) | [
"def",
"blast",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"align",
"import",
"run_megablast",
"p",
"=",
"OptionParser",
"(",
"blast",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-n\"",
",",
"type",
"=",
"\"int\"",
",",
"default"... | %prog blast allfasta clonename
Insert a component into agpfile by aligning to the best hit in pool and see
if they have good overlaps. | [
"%prog",
"blast",
"allfasta",
"clonename"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L724-L772 | train | 200,910 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | bes | def bes(args):
"""
%prog bes bacfasta clonename
Use the clone name to download BES gss sequences from Genbank, map and then
visualize.
"""
from jcvi.apps.align import run_blat
p = OptionParser(bes.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bacfasta, clonename = args
entrez([clonename, "--database=nucgss", "--skipcheck"])
besfasta = clonename + ".fasta"
blatfile = clonename + ".bes.blat"
run_blat(infile=besfasta, outfile=blatfile, db=bacfasta, \
pctid=95, hitlen=100, cpus=opts.cpus)
aid, asize = next(Fasta(bacfasta).itersizes())
width = 50
msg = "=" * width
msg += " " + aid
print(msg, file=sys.stderr)
ratio = width * 1. / asize
_ = lambda x: int(round(x * ratio, 0))
blasts = [BlastLine(x) for x in open(blatfile)]
for b in blasts:
if b.orientation == '+':
msg = " " * _(b.sstart) + "->"
else:
msg = " " * (_(b.sstop) - 2) + "<-"
msg += " " * (width - len(msg) + 2)
msg += b.query
if b.orientation == '+':
msg += " (hang={0})".format(b.sstart - 1)
else:
msg += " (hang={0})".format(asize - b.sstop)
print(msg, file=sys.stderr) | python | def bes(args):
"""
%prog bes bacfasta clonename
Use the clone name to download BES gss sequences from Genbank, map and then
visualize.
"""
from jcvi.apps.align import run_blat
p = OptionParser(bes.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bacfasta, clonename = args
entrez([clonename, "--database=nucgss", "--skipcheck"])
besfasta = clonename + ".fasta"
blatfile = clonename + ".bes.blat"
run_blat(infile=besfasta, outfile=blatfile, db=bacfasta, \
pctid=95, hitlen=100, cpus=opts.cpus)
aid, asize = next(Fasta(bacfasta).itersizes())
width = 50
msg = "=" * width
msg += " " + aid
print(msg, file=sys.stderr)
ratio = width * 1. / asize
_ = lambda x: int(round(x * ratio, 0))
blasts = [BlastLine(x) for x in open(blatfile)]
for b in blasts:
if b.orientation == '+':
msg = " " * _(b.sstart) + "->"
else:
msg = " " * (_(b.sstop) - 2) + "<-"
msg += " " * (width - len(msg) + 2)
msg += b.query
if b.orientation == '+':
msg += " (hang={0})".format(b.sstart - 1)
else:
msg += " (hang={0})".format(asize - b.sstop)
print(msg, file=sys.stderr) | [
"def",
"bes",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"align",
"import",
"run_blat",
"p",
"=",
"OptionParser",
"(",
"bes",
".",
"__doc__",
")",
"p",
".",
"set_cpus",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
... | %prog bes bacfasta clonename
Use the clone name to download BES gss sequences from Genbank, map and then
visualize. | [
"%prog",
"bes",
"bacfasta",
"clonename"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L775-L821 | train | 200,911 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | flip | def flip(args):
"""
%prog flip fastafile
Go through each FASTA record, check against Genbank file and determines
whether or not to flip the sequence. This is useful before updates of the
sequences to make sure the same orientation is used.
"""
p = OptionParser(flip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
outfastafile = fastafile.rsplit(".", 1)[0] + ".flipped.fasta"
fo = open(outfastafile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
tmpfasta = "a.fasta"
fw = open(tmpfasta, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
o = overlap([tmpfasta, name])
if o.orientation == '-':
rec.seq = rec.seq.reverse_complement()
SeqIO.write([rec], fo, "fasta")
os.remove(tmpfasta) | python | def flip(args):
"""
%prog flip fastafile
Go through each FASTA record, check against Genbank file and determines
whether or not to flip the sequence. This is useful before updates of the
sequences to make sure the same orientation is used.
"""
p = OptionParser(flip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
outfastafile = fastafile.rsplit(".", 1)[0] + ".flipped.fasta"
fo = open(outfastafile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
tmpfasta = "a.fasta"
fw = open(tmpfasta, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
o = overlap([tmpfasta, name])
if o.orientation == '-':
rec.seq = rec.seq.reverse_complement()
SeqIO.write([rec], fo, "fasta")
os.remove(tmpfasta) | [
"def",
"flip",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"flip",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",... | %prog flip fastafile
Go through each FASTA record, check against Genbank file and determines
whether or not to flip the sequence. This is useful before updates of the
sequences to make sure the same orientation is used. | [
"%prog",
"flip",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L824-L853 | train | 200,912 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | batchoverlap | def batchoverlap(args):
"""
%prog batchoverlap pairs.txt outdir
Check overlaps between pairs of sequences.
"""
p = OptionParser(batchoverlap.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, outdir = args
fp = open(pairsfile)
cmds = []
mkdir("overlaps")
for row in fp:
a, b = row.split()[:2]
oa = op.join(outdir, a + ".fa")
ob = op.join(outdir, b + ".fa")
cmd = "python -m jcvi.assembly.goldenpath overlap {0} {1}".format(oa, ob)
cmd += " -o overlaps/{0}_{1}.ov".format(a, b)
cmds.append(cmd)
print("\n".join(cmds)) | python | def batchoverlap(args):
"""
%prog batchoverlap pairs.txt outdir
Check overlaps between pairs of sequences.
"""
p = OptionParser(batchoverlap.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, outdir = args
fp = open(pairsfile)
cmds = []
mkdir("overlaps")
for row in fp:
a, b = row.split()[:2]
oa = op.join(outdir, a + ".fa")
ob = op.join(outdir, b + ".fa")
cmd = "python -m jcvi.assembly.goldenpath overlap {0} {1}".format(oa, ob)
cmd += " -o overlaps/{0}_{1}.ov".format(a, b)
cmds.append(cmd)
print("\n".join(cmds)) | [
"def",
"batchoverlap",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"batchoverlap",
".",
"__doc__",
")",
"p",
".",
"set_cpus",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=... | %prog batchoverlap pairs.txt outdir
Check overlaps between pairs of sequences. | [
"%prog",
"batchoverlap",
"pairs",
".",
"txt",
"outdir"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L856-L881 | train | 200,913 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | certificate | def certificate(args):
"""
%prog certificate tpffile certificatefile
Generate certificate file for all overlaps in tpffile. tpffile can be
generated by jcvi.formats.agp.tpf().
North chr1 2 0 AC229737.8 telomere 58443
South chr1 2 1 AC229737.8 AC202463.29 58443 37835 58443 + Non-terminal
Each line describes a relationship between the current BAC and the
north/south BAC. First, "North/South" tag, then the chromosome, phases of
the two BACs, ids of the two BACs, the size and the overlap start-stop of
the CURRENT BAC, and orientation. Each BAC will have two lines in the
certificate file.
"""
p = OptionParser(certificate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tpffile, certificatefile = args
fastadir = "fasta"
tpf = TPF(tpffile)
data = check_certificate(certificatefile)
fw = must_open(certificatefile, "w")
for i, a in enumerate(tpf):
if a.is_gap:
continue
aid = a.component_id
af = op.join(fastadir, aid + ".fasta")
if not op.exists(af): # Check to avoid redownload
entrez([aid, "--skipcheck", "--outdir=" + fastadir])
north, south = tpf.getNorthSouthClone(i)
aphase, asize = phase(aid)
for tag, p in (("North", north), ("South", south)):
if not p: # end of the chromosome
ov = "telomere\t{0}".format(asize)
elif p.isCloneGap:
bphase = "0"
ov = "{0}\t{1}".format(p.gap_type, asize)
else:
bid = p.component_id
bphase, bsize = phase(bid)
key = (tag, aid, bid)
if key in data:
print(data[key], file=fw)
continue
ar = [aid, bid, "--dir=" + fastadir]
o = overlap(ar)
ov = o.certificateline if o \
else "{0}\t{1}\tNone".format(bid, asize)
print("\t".join(str(x) for x in \
(tag, a.object, aphase, bphase, aid, ov)), file=fw)
fw.flush() | python | def certificate(args):
"""
%prog certificate tpffile certificatefile
Generate certificate file for all overlaps in tpffile. tpffile can be
generated by jcvi.formats.agp.tpf().
North chr1 2 0 AC229737.8 telomere 58443
South chr1 2 1 AC229737.8 AC202463.29 58443 37835 58443 + Non-terminal
Each line describes a relationship between the current BAC and the
north/south BAC. First, "North/South" tag, then the chromosome, phases of
the two BACs, ids of the two BACs, the size and the overlap start-stop of
the CURRENT BAC, and orientation. Each BAC will have two lines in the
certificate file.
"""
p = OptionParser(certificate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tpffile, certificatefile = args
fastadir = "fasta"
tpf = TPF(tpffile)
data = check_certificate(certificatefile)
fw = must_open(certificatefile, "w")
for i, a in enumerate(tpf):
if a.is_gap:
continue
aid = a.component_id
af = op.join(fastadir, aid + ".fasta")
if not op.exists(af): # Check to avoid redownload
entrez([aid, "--skipcheck", "--outdir=" + fastadir])
north, south = tpf.getNorthSouthClone(i)
aphase, asize = phase(aid)
for tag, p in (("North", north), ("South", south)):
if not p: # end of the chromosome
ov = "telomere\t{0}".format(asize)
elif p.isCloneGap:
bphase = "0"
ov = "{0}\t{1}".format(p.gap_type, asize)
else:
bid = p.component_id
bphase, bsize = phase(bid)
key = (tag, aid, bid)
if key in data:
print(data[key], file=fw)
continue
ar = [aid, bid, "--dir=" + fastadir]
o = overlap(ar)
ov = o.certificateline if o \
else "{0}\t{1}\tNone".format(bid, asize)
print("\t".join(str(x) for x in \
(tag, a.object, aphase, bphase, aid, ov)), file=fw)
fw.flush() | [
"def",
"certificate",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"certificate",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
... | %prog certificate tpffile certificatefile
Generate certificate file for all overlaps in tpffile. tpffile can be
generated by jcvi.formats.agp.tpf().
North chr1 2 0 AC229737.8 telomere 58443
South chr1 2 1 AC229737.8 AC202463.29 58443 37835 58443 + Non-terminal
Each line describes a relationship between the current BAC and the
north/south BAC. First, "North/South" tag, then the chromosome, phases of
the two BACs, ids of the two BACs, the size and the overlap start-stop of
the CURRENT BAC, and orientation. Each BAC will have two lines in the
certificate file. | [
"%prog",
"certificate",
"tpffile",
"certificatefile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L995-L1058 | train | 200,914 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | neighbor | def neighbor(args):
"""
%prog neighbor agpfile componentID
Check overlaps of a particular component in agpfile.
"""
p = OptionParser(neighbor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
agpfile, componentID = args
fastadir = "fasta"
cmd = "grep"
cmd += " --color -C2 {0} {1}".format(componentID, agpfile)
sh(cmd)
agp = AGP(agpfile)
aorder = agp.order
if not componentID in aorder:
print("Record {0} not present in `{1}`."\
.format(componentID, agpfile), file=sys.stderr)
return
i, c = aorder[componentID]
north, south = agp.getNorthSouthClone(i)
if not north.isCloneGap:
ar = [north.component_id, componentID, "--dir=" + fastadir]
if north.orientation == '-':
ar += ["--qreverse"]
overlap(ar)
if not south.isCloneGap:
ar = [componentID, south.component_id, "--dir=" + fastadir]
if c.orientation == '-':
ar += ["--qreverse"]
overlap(ar) | python | def neighbor(args):
"""
%prog neighbor agpfile componentID
Check overlaps of a particular component in agpfile.
"""
p = OptionParser(neighbor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
agpfile, componentID = args
fastadir = "fasta"
cmd = "grep"
cmd += " --color -C2 {0} {1}".format(componentID, agpfile)
sh(cmd)
agp = AGP(agpfile)
aorder = agp.order
if not componentID in aorder:
print("Record {0} not present in `{1}`."\
.format(componentID, agpfile), file=sys.stderr)
return
i, c = aorder[componentID]
north, south = agp.getNorthSouthClone(i)
if not north.isCloneGap:
ar = [north.component_id, componentID, "--dir=" + fastadir]
if north.orientation == '-':
ar += ["--qreverse"]
overlap(ar)
if not south.isCloneGap:
ar = [componentID, south.component_id, "--dir=" + fastadir]
if c.orientation == '-':
ar += ["--qreverse"]
overlap(ar) | [
"def",
"neighbor",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"neighbor",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog neighbor agpfile componentID
Check overlaps of a particular component in agpfile. | [
"%prog",
"neighbor",
"agpfile",
"componentID"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L1061-L1100 | train | 200,915 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | agp | def agp(args):
"""
%prog agp tpffile certificatefile agpfile
Build agpfile from overlap certificates.
Tiling Path File (tpf) is a file that lists the component and the gaps.
It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na
AC229737.8 chr1 +
AC202463.29 chr1 +
Note: the orientation of the component is only used as a guide. If the
orientation is derivable from a terminal overlap, it will use it regardless
of what the tpf says.
See jcvi.assembly.goldenpath.certificate() which generates a list of
certificates based on agpfile. At first, it seems counter-productive to
convert first agp to certificates then certificates back to agp.
The certificates provide a way to edit the overlap information, so that the
agpfile can be corrected (without changing agpfile directly).
"""
from jcvi.formats.base import DictFile
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
tpffile, certificatefile, agpfile = args
orientationguide = DictFile(tpffile, valuepos=2)
cert = Certificate(certificatefile)
cert.write_AGP(agpfile, orientationguide=orientationguide) | python | def agp(args):
"""
%prog agp tpffile certificatefile agpfile
Build agpfile from overlap certificates.
Tiling Path File (tpf) is a file that lists the component and the gaps.
It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na
AC229737.8 chr1 +
AC202463.29 chr1 +
Note: the orientation of the component is only used as a guide. If the
orientation is derivable from a terminal overlap, it will use it regardless
of what the tpf says.
See jcvi.assembly.goldenpath.certificate() which generates a list of
certificates based on agpfile. At first, it seems counter-productive to
convert first agp to certificates then certificates back to agp.
The certificates provide a way to edit the overlap information, so that the
agpfile can be corrected (without changing agpfile directly).
"""
from jcvi.formats.base import DictFile
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
tpffile, certificatefile, agpfile = args
orientationguide = DictFile(tpffile, valuepos=2)
cert = Certificate(certificatefile)
cert.write_AGP(agpfile, orientationguide=orientationguide) | [
"def",
"agp",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"DictFile",
"p",
"=",
"OptionParser",
"(",
"agp",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",... | %prog agp tpffile certificatefile agpfile
Build agpfile from overlap certificates.
Tiling Path File (tpf) is a file that lists the component and the gaps.
It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na
AC229737.8 chr1 +
AC202463.29 chr1 +
Note: the orientation of the component is only used as a guide. If the
orientation is derivable from a terminal overlap, it will use it regardless
of what the tpf says.
See jcvi.assembly.goldenpath.certificate() which generates a list of
certificates based on agpfile. At first, it seems counter-productive to
convert first agp to certificates then certificates back to agp.
The certificates provide a way to edit the overlap information, so that the
agpfile can be corrected (without changing agpfile directly). | [
"%prog",
"agp",
"tpffile",
"certificatefile",
"agpfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L1103-L1138 | train | 200,916 |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | Overlap.update_clr | def update_clr(self, aclr, bclr):
"""
Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB
"""
print(aclr, bclr, file=sys.stderr)
otype = self.otype
if otype == 1:
if aclr.orientation == '+':
aclr.end = self.qstop
else:
aclr.start = self.qstart
if bclr.orientation == '+':
bclr.start = self.sstop + 1
else:
bclr.end = self.sstart - 1
elif otype == 3:
aclr.start = aclr.end
elif otype == 4:
bclr.start = bclr.end
print(aclr, bclr, file=sys.stderr) | python | def update_clr(self, aclr, bclr):
"""
Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB
"""
print(aclr, bclr, file=sys.stderr)
otype = self.otype
if otype == 1:
if aclr.orientation == '+':
aclr.end = self.qstop
else:
aclr.start = self.qstart
if bclr.orientation == '+':
bclr.start = self.sstop + 1
else:
bclr.end = self.sstart - 1
elif otype == 3:
aclr.start = aclr.end
elif otype == 4:
bclr.start = bclr.end
print(aclr, bclr, file=sys.stderr) | [
"def",
"update_clr",
"(",
"self",
",",
"aclr",
",",
"bclr",
")",
":",
"print",
"(",
"aclr",
",",
"bclr",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"otype",
"=",
"self",
".",
"otype",
"if",
"otype",
"==",
"1",
":",
"if",
"aclr",
".",
"orientati... | Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB | [
"Zip",
"the",
"two",
"sequences",
"together",
"using",
"left",
"-",
"greedy",
"rule"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L156-L183 | train | 200,917 |
tanghaibao/jcvi | jcvi/variation/cnv.py | gcdepth | def gcdepth(args):
"""
%prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
```
"""
import hashlib
from jcvi.algorithms.formula import MAD_interval as confidence_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha1(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > .001) | (mf["gc"] > .001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * .01, confidence_interval(v))
for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(mf["gc"], mf["depth"], ".", color="lightslategray", ms=2,
mec="lightslategray", alpha=.1)
patch = plt.fill_between(gcd_x, lo, hi,
facecolor=color, alpha=.25, zorder=10,
linewidth=0.0, label="Median +/- MAD band")
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png") | python | def gcdepth(args):
"""
%prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
```
"""
import hashlib
from jcvi.algorithms.formula import MAD_interval as confidence_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha1(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > .001) | (mf["gc"] > .001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * .01, confidence_interval(v))
for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(mf["gc"], mf["depth"], ".", color="lightslategray", ms=2,
mec="lightslategray", alpha=.1)
patch = plt.fill_between(gcd_x, lo, hi,
facecolor=color, alpha=.25, zorder=10,
linewidth=0.0, label="Median +/- MAD band")
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png") | [
"def",
"gcdepth",
"(",
"args",
")",
":",
"import",
"hashlib",
"from",
"jcvi",
".",
"algorithms",
".",
"formula",
"import",
"MAD_interval",
"as",
"confidence_interval",
"from",
"jcvi",
".",
"graphics",
".",
"base",
"import",
"latex",
",",
"plt",
",",
"savefig... | %prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
``` | [
"%prog",
"gcdepth",
"sample_name",
"tag"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L366-L437 | train | 200,918 |
tanghaibao/jcvi | jcvi/variation/cnv.py | exonunion | def exonunion(args):
"""
%prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";'
"""
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gencodebed, = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7",
o=','.join(['first'] * 4)))) | python | def exonunion(args):
"""
%prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";'
"""
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gencodebed, = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7",
o=','.join(['first'] * 4)))) | [
"def",
"exonunion",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"exonunion",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"("... | %prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";' | [
"%prog",
"exonunion",
"gencode",
".",
"v26",
".",
"annotation",
".",
"exon",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L440-L462 | train | 200,919 |
tanghaibao/jcvi | jcvi/variation/cnv.py | summarycanvas | def summarycanvas(args):
"""
%prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
"""
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(pf + " " +
" ".join("{}:{}".format(k, v)
for k, v in sorted(counter.items()))) | python | def summarycanvas(args):
"""
%prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
"""
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(pf + " " +
" ".join("{}:{}".format(k, v)
for k, v in sorted(counter.items()))) | [
"def",
"summarycanvas",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"summarycanvas",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"sys",
".",
"exit"... | %prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output. | [
"%prog",
"summarycanvas",
"output",
".",
"vcf",
".",
"gz"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L478-L495 | train | 200,920 |
tanghaibao/jcvi | jcvi/variation/cnv.py | parse_segments | def parse_segments(vcffile):
""" Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2
"""
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get('END') - 1
cn, = v.format('CN')[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds | python | def parse_segments(vcffile):
""" Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2
"""
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get('END') - 1
cn, = v.format('CN')[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds | [
"def",
"parse_segments",
"(",
"vcffile",
")",
":",
"from",
"cStringIO",
"import",
"StringIO",
"from",
"cyvcf2",
"import",
"VCF",
"output",
"=",
"StringIO",
"(",
")",
"for",
"v",
"in",
"VCF",
"(",
"vcffile",
")",
":",
"chrom",
"=",
"v",
".",
"CHROM",
"s... | Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2 | [
"Extract",
"all",
"copy",
"number",
"segments",
"from",
"a",
"CANVAS",
"file"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L498-L517 | train | 200,921 |
tanghaibao/jcvi | jcvi/variation/cnv.py | counter_mean_and_median | def counter_mean_and_median(counter):
""" Calculate the mean and median value of a counter
"""
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1. / total
return mean, median | python | def counter_mean_and_median(counter):
""" Calculate the mean and median value of a counter
"""
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1. / total
return mean, median | [
"def",
"counter_mean_and_median",
"(",
"counter",
")",
":",
"if",
"not",
"counter",
":",
"return",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"total",
"=",
"sum",
"(",
"v",
"for",
"k",
",",
"v",
"in",
"counter",
".",
"items",
"(",
")",
")",
"mid",
... | Calculate the mean and median value of a counter | [
"Calculate",
"the",
"mean",
"and",
"median",
"value",
"of",
"a",
"counter"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L520-L538 | train | 200,922 |
tanghaibao/jcvi | jcvi/variation/cnv.py | vcf_to_df_worker | def vcf_to_df_worker(arg):
""" Convert CANVAS vcf to a dict, single thread
"""
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit('_', 1)[0]
d = {'SampleKey': samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d | python | def vcf_to_df_worker(arg):
""" Convert CANVAS vcf to a dict, single thread
"""
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit('_', 1)[0]
d = {'SampleKey': samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d | [
"def",
"vcf_to_df_worker",
"(",
"arg",
")",
":",
"canvasvcf",
",",
"exonbed",
",",
"i",
"=",
"arg",
"logging",
".",
"debug",
"(",
"\"Working on job {}: {}\"",
".",
"format",
"(",
"i",
",",
"canvasvcf",
")",
")",
"samplekey",
"=",
"op",
".",
"basename",
"... | Convert CANVAS vcf to a dict, single thread | [
"Convert",
"CANVAS",
"vcf",
"to",
"a",
"dict",
"single",
"thread"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L578-L613 | train | 200,923 |
tanghaibao/jcvi | jcvi/variation/cnv.py | vcf_to_df | def vcf_to_df(canvasvcfs, exonbed, cpus):
""" Compile a number of vcf files into tsv file for easy manipulation
"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df | python | def vcf_to_df(canvasvcfs, exonbed, cpus):
""" Compile a number of vcf files into tsv file for easy manipulation
"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df | [
"def",
"vcf_to_df",
"(",
"canvasvcfs",
",",
"exonbed",
",",
"cpus",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"p",
"=",
"Pool",
"(",
"processes",
"=",
"cpus",
")",
"results",
"=",
"[",
"]",
"args",
"=",
"[",
"(",
"x",
",",
"exonbed",... | Compile a number of vcf files into tsv file for easy manipulation | [
"Compile",
"a",
"number",
"of",
"vcf",
"files",
"into",
"tsv",
"file",
"for",
"easy",
"manipulation"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L616-L629 | train | 200,924 |
tanghaibao/jcvi | jcvi/variation/cnv.py | df_to_tsv | def df_to_tsv(df, tsvfile, suffix):
""" Serialize the dataframe as a tsv
"""
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns
if x.endswith(suffix))
tf = df.reindex_axis(columns, axis='columns')
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep='\t', index=False, float_format='%.4g', na_rep="na")
print("TSV output written to `{}` (# samples={})"\
.format(tsvfile, tf.shape[0]), file=sys.stderr) | python | def df_to_tsv(df, tsvfile, suffix):
""" Serialize the dataframe as a tsv
"""
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns
if x.endswith(suffix))
tf = df.reindex_axis(columns, axis='columns')
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep='\t', index=False, float_format='%.4g', na_rep="na")
print("TSV output written to `{}` (# samples={})"\
.format(tsvfile, tf.shape[0]), file=sys.stderr) | [
"def",
"df_to_tsv",
"(",
"df",
",",
"tsvfile",
",",
"suffix",
")",
":",
"tsvfile",
"+=",
"suffix",
"columns",
"=",
"[",
"\"SampleKey\"",
"]",
"+",
"sorted",
"(",
"x",
"for",
"x",
"in",
"df",
".",
"columns",
"if",
"x",
".",
"endswith",
"(",
"suffix",
... | Serialize the dataframe as a tsv | [
"Serialize",
"the",
"dataframe",
"as",
"a",
"tsv"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L632-L642 | train | 200,925 |
tanghaibao/jcvi | jcvi/variation/cnv.py | plot | def plot(args):
"""
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
"""
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | python | def plot(args):
"""
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
"""
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"plot",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"graphics",
".",
"base",
"import",
"savefig",
"p",
"=",
"OptionParser",
"(",
"plot",
".",
"__doc__",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
"... | %prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/. | [
"%prog",
"plot",
"workdir",
"sample",
"chr1",
"chr2"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L672-L693 | train | 200,926 |
tanghaibao/jcvi | jcvi/variation/cnv.py | sweep | def sweep(args):
"""
%prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space.
"""
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** .5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [.00012 * golden_ratio ** x for x in range(10)]
sigmas = [.0012 * golden_ratio ** x for x in range(20)]
thresholds = [.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd) | python | def sweep(args):
"""
%prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space.
"""
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** .5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [.00012 * golden_ratio ** x for x in range(10)]
sigmas = [.0012 * golden_ratio ** x for x in range(20)]
thresholds = [.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd) | [
"def",
"sweep",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sweep",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not... | %prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space. | [
"%prog",
"sweep",
"workdir",
"102340_NA12878"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L696-L722 | train | 200,927 |
tanghaibao/jcvi | jcvi/variation/cnv.py | cib | def cib(args):
"""
%prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base).
"""
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for res in p.imap(bam_to_cib, task_args):
continue | python | def cib(args):
"""
%prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base).
"""
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for res in p.imap(bam_to_cib, task_args):
continue | [
"def",
"cib",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"cib",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--prefix\"",
",",
"help",
"=",
"\"Report seqids with this prefix only\"",
")",
"p",
".",
"set_cpus",
"(",
")",
"opts",
",",
"a... | %prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base). | [
"%prog",
"cib",
"bamfile",
"samplekey"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L781-L811 | train | 200,928 |
tanghaibao/jcvi | jcvi/variation/cnv.py | batchcn | def batchcn(args):
"""
%prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir.
"""
p = OptionParser(batchcn.__doc__)
p.add_option("--upload", default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal))) | python | def batchcn(args):
"""
%prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir.
"""
p = OptionParser(batchcn.__doc__)
p.add_option("--upload", default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal))) | [
"def",
"batchcn",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"batchcn",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--upload\"",
",",
"default",
"=",
"\"s3://hli-mv-data-science/htang/ccn\"",
",",
"help",
"=",
"\"Upload cn and seg results to s3... | %prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir. | [
"%prog",
"batchcn",
"workdir",
"samples",
".",
"csv"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L814-L846 | train | 200,929 |
tanghaibao/jcvi | jcvi/variation/cnv.py | hmm | def hmm(args):
"""
%prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory.
"""
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=.003, type="float",
help="Transition probability")
p.add_option("--sigma", default=.1, type="float",
help="Standard deviation of Gaussian emission distribution")
p.add_option("--threshold", default=1, type="float",
help="Standard deviation must be < this "
"in the baseline population")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(workdir=workdir, mu=opts.mu, sigma=opts.sigma,
threshold=opts.threshold)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}"\
.format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug("A total of {} aberrant events written to `{}`"
.format(nevents, hmmfile))
return hmmfile | python | def hmm(args):
"""
%prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory.
"""
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=.003, type="float",
help="Transition probability")
p.add_option("--sigma", default=.1, type="float",
help="Standard deviation of Gaussian emission distribution")
p.add_option("--threshold", default=1, type="float",
help="Standard deviation must be < this "
"in the baseline population")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(workdir=workdir, mu=opts.mu, sigma=opts.sigma,
threshold=opts.threshold)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}"\
.format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug("A total of {} aberrant events written to `{}`"
.format(nevents, hmmfile))
return hmmfile | [
"def",
"hmm",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"hmm",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--mu\"",
",",
"default",
"=",
".003",
",",
"type",
"=",
"\"float\"",
",",
"help",
"=",
"\"Transition probability\"",
")",
"p... | %prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory. | [
"%prog",
"hmm",
"workdir",
"sample_key"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L849-L888 | train | 200,930 |
tanghaibao/jcvi | jcvi/variation/cnv.py | batchccn | def batchccn(args):
"""
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
"""
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write() | python | def batchccn(args):
"""
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
"""
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write() | [
"def",
"batchccn",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"batchccn",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
... | %prog batchccn test.csv
Run CCN script in batch. Write makefile. | [
"%prog",
"batchccn",
"test",
".",
"csv"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L891-L919 | train | 200,931 |
tanghaibao/jcvi | jcvi/variation/cnv.py | mergecn | def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | python | def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | [
"def",
"mergecn",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mergecn",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
... | %prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another. | [
"%prog",
"mergecn",
"FACE",
".",
"csv"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L922-L975 | train | 200,932 |
tanghaibao/jcvi | jcvi/variation/cnv.py | CopyNumberHMM.annotate_segments | def annotate_segments(self, Z):
""" Report the copy number and start-end segment
"""
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments | python | def annotate_segments(self, Z):
""" Report the copy number and start-end segment
"""
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments | [
"def",
"annotate_segments",
"(",
"self",
",",
"Z",
")",
":",
"# We need a way to go from compressed idices to original indices",
"P",
"=",
"Z",
".",
"copy",
"(",
")",
"P",
"[",
"~",
"np",
".",
"isfinite",
"(",
"P",
")",
"]",
"=",
"-",
"1",
"_",
",",
"map... | Report the copy number and start-end segment | [
"Report",
"the",
"copy",
"number",
"and",
"start",
"-",
"end",
"segment"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L217-L234 | train | 200,933 |
tanghaibao/jcvi | jcvi/utils/aws.py | role | def role(args):
"""
%prog role htang
Change aws role.
"""
src_acct, src_username, dst_acct, dst_role = \
"205134639408 htang 114692162163 mvrad-datasci-role".split()
p = OptionParser(role.__doc__)
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
p.add_option('--device',
default="arn:aws:iam::" + src_acct + ":mfa/" + src_username,
metavar='arn:aws:iam::123456788990:mfa/dudeman',
help="The MFA Device ARN. This value can also be "
"provided via the environment variable 'MFA_DEVICE' or"
" the ~/.aws/credentials variable 'aws_mfa_device'.")
p.add_option('--duration',
type=int, default=3600,
help="The duration, in seconds, that the temporary "
"credentials should remain valid. Minimum value: "
"900 (15 minutes). Maximum: 129600 (36 hours). "
"Defaults to 43200 (12 hours), or 3600 (one "
"hour) when using '--assume-role'. This value "
"can also be provided via the environment "
"variable 'MFA_STS_DURATION'. ")
p.add_option('--assume-role', '--assume',
default="arn:aws:iam::" + dst_acct + ":role/" + dst_role,
metavar='arn:aws:iam::123456788990:role/RoleName',
help="The ARN of the AWS IAM Role you would like to "
"assume, if specified. This value can also be provided"
" via the environment variable 'MFA_ASSUME_ROLE'")
p.add_option('--role-session-name',
help="Friendly session name required when using "
"--assume-role",
default=getpass.getuser())
p.add_option('--force',
help="Refresh credentials even if currently valid.",
action="store_true")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
# Use a config to check the expiration of session token
config = get_config(AWS_CREDS_PATH)
validate(opts, config) | python | def role(args):
"""
%prog role htang
Change aws role.
"""
src_acct, src_username, dst_acct, dst_role = \
"205134639408 htang 114692162163 mvrad-datasci-role".split()
p = OptionParser(role.__doc__)
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
p.add_option('--device',
default="arn:aws:iam::" + src_acct + ":mfa/" + src_username,
metavar='arn:aws:iam::123456788990:mfa/dudeman',
help="The MFA Device ARN. This value can also be "
"provided via the environment variable 'MFA_DEVICE' or"
" the ~/.aws/credentials variable 'aws_mfa_device'.")
p.add_option('--duration',
type=int, default=3600,
help="The duration, in seconds, that the temporary "
"credentials should remain valid. Minimum value: "
"900 (15 minutes). Maximum: 129600 (36 hours). "
"Defaults to 43200 (12 hours), or 3600 (one "
"hour) when using '--assume-role'. This value "
"can also be provided via the environment "
"variable 'MFA_STS_DURATION'. ")
p.add_option('--assume-role', '--assume',
default="arn:aws:iam::" + dst_acct + ":role/" + dst_role,
metavar='arn:aws:iam::123456788990:role/RoleName',
help="The ARN of the AWS IAM Role you would like to "
"assume, if specified. This value can also be provided"
" via the environment variable 'MFA_ASSUME_ROLE'")
p.add_option('--role-session-name',
help="Friendly session name required when using "
"--assume-role",
default=getpass.getuser())
p.add_option('--force',
help="Refresh credentials even if currently valid.",
action="store_true")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
# Use a config to check the expiration of session token
config = get_config(AWS_CREDS_PATH)
validate(opts, config) | [
"def",
"role",
"(",
"args",
")",
":",
"src_acct",
",",
"src_username",
",",
"dst_acct",
",",
"dst_role",
"=",
"\"205134639408 htang 114692162163 mvrad-datasci-role\"",
".",
"split",
"(",
")",
"p",
"=",
"OptionParser",
"(",
"role",
".",
"__doc__",
")",
"p",
"."... | %prog role htang
Change aws role. | [
"%prog",
"role",
"htang"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/aws.py#L483-L529 | train | 200,934 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | query | def query(args):
"""
%prog query out.loci contig
Random access to loci file. This script helps speeding up debugging.
"""
p = OptionParser(query.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
locifile, contig = args
idx = build_index(locifile)
pos = idx[contig]
logging.debug("Contig {0} found at pos {1}".format(contig, pos))
fp = open(locifile)
fp.seek(pos)
section = []
while True:
row = fp.readline()
if row.startswith("//") and row.split()[1] != contig:
break
section.append(row)
print("".join(section)) | python | def query(args):
"""
%prog query out.loci contig
Random access to loci file. This script helps speeding up debugging.
"""
p = OptionParser(query.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
locifile, contig = args
idx = build_index(locifile)
pos = idx[contig]
logging.debug("Contig {0} found at pos {1}".format(contig, pos))
fp = open(locifile)
fp.seek(pos)
section = []
while True:
row = fp.readline()
if row.startswith("//") and row.split()[1] != contig:
break
section.append(row)
print("".join(section)) | [
"def",
"query",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"query",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not... | %prog query out.loci contig
Random access to loci file. This script helps speeding up debugging. | [
"%prog",
"query",
"out",
".",
"loci",
"contig"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L91-L115 | train | 200,935 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | synteny | def synteny(args):
"""
%prog synteny mstmap.out novo.final.fasta reference.fasta
Plot MSTmap against reference genome.
"""
from jcvi.assembly.geneticmap import bed as geneticmap_bed
from jcvi.apps.align import blat
from jcvi.formats.blast import bed as blast_bed, best
p = OptionParser(synteny.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
mstmapout, novo, ref = args
pf = mstmapout.split(".")[0]
rf = ref.split(".")[0]
mstmapbed = geneticmap_bed([mstmapout])
cmd = "cut -d. -f1 {0}".format(mstmapbed)
tmpbed = mstmapbed + ".tmp"
sh(cmd, outfile=tmpbed)
os.rename(tmpbed, pf + ".bed")
cmd = "cut -f4 {0} | cut -d. -f1 | sort -u".format(mstmapbed)
idsfile = pf + ".ids"
sh(cmd, outfile=idsfile)
fastafile = pf + ".fasta"
cmd = "faSomeRecords {0} {1} {2}".format(novo, idsfile, fastafile)
sh(cmd)
blastfile = blat([ref, fastafile])
bestblastfile = best([blastfile])
blastbed = blast_bed([bestblastfile])
os.rename(blastbed, rf + ".bed")
anchorsfile = "{0}.{1}.anchors".format(pf, rf)
cmd = "paste {0} {0}".format(idsfile)
sh(cmd, outfile=anchorsfile) | python | def synteny(args):
"""
%prog synteny mstmap.out novo.final.fasta reference.fasta
Plot MSTmap against reference genome.
"""
from jcvi.assembly.geneticmap import bed as geneticmap_bed
from jcvi.apps.align import blat
from jcvi.formats.blast import bed as blast_bed, best
p = OptionParser(synteny.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
mstmapout, novo, ref = args
pf = mstmapout.split(".")[0]
rf = ref.split(".")[0]
mstmapbed = geneticmap_bed([mstmapout])
cmd = "cut -d. -f1 {0}".format(mstmapbed)
tmpbed = mstmapbed + ".tmp"
sh(cmd, outfile=tmpbed)
os.rename(tmpbed, pf + ".bed")
cmd = "cut -f4 {0} | cut -d. -f1 | sort -u".format(mstmapbed)
idsfile = pf + ".ids"
sh(cmd, outfile=idsfile)
fastafile = pf + ".fasta"
cmd = "faSomeRecords {0} {1} {2}".format(novo, idsfile, fastafile)
sh(cmd)
blastfile = blat([ref, fastafile])
bestblastfile = best([blastfile])
blastbed = blast_bed([bestblastfile])
os.rename(blastbed, rf + ".bed")
anchorsfile = "{0}.{1}.anchors".format(pf, rf)
cmd = "paste {0} {0}".format(idsfile)
sh(cmd, outfile=anchorsfile) | [
"def",
"synteny",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"geneticmap",
"import",
"bed",
"as",
"geneticmap_bed",
"from",
"jcvi",
".",
"apps",
".",
"align",
"import",
"blat",
"from",
"jcvi",
".",
"formats",
".",
"blast",
"import",
"be... | %prog synteny mstmap.out novo.final.fasta reference.fasta
Plot MSTmap against reference genome. | [
"%prog",
"synteny",
"mstmap",
".",
"out",
"novo",
".",
"final",
".",
"fasta",
"reference",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L118-L156 | train | 200,936 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | mstmap | def mstmap(args):
"""
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) | python | def mstmap(args):
"""
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) | [
"def",
"mstmap",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"geneticmap",
"import",
"MSTMatrix",
"p",
"=",
"OptionParser",
"(",
"mstmap",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--population_type\"",
",",
"default",
"=",
"\"R... | %prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input. | [
"%prog",
"mstmap",
"LMD50",
".",
"snps",
".",
"genotype",
".",
"txt"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L159-L192 | train | 200,937 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | count | def count(args):
"""
%prog count cdhit.consensus.fasta
Scan the headers for the consensus clusters and count the number of reads.
"""
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.utils.cbook import SummaryStats
p = OptionParser(count.__doc__)
p.add_option("--csv", help="Write depth per contig to file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
csv = open(opts.csv, "w") if opts.csv else None
f = Fasta(fastafile, lazy=True)
sizes = []
for desc, rec in f.iterdescriptions_ordered():
if desc.startswith("singleton"):
sizes.append(1)
continue
# consensus_for_cluster_0 with 63 sequences
if "with" in desc:
name, w, size, seqs = desc.split()
if csv:
print("\t".join(str(x)
for x in (name, size, len(rec))), file=csv)
assert w == "with"
sizes.append(int(size))
# MRD85:00603:02472;size=167;
else:
name, size, tail = desc.split(";")
sizes.append(int(size.replace("size=", "")))
if csv:
csv.close()
logging.debug("File written to `{0}`".format(opts.csv))
s = SummaryStats(sizes)
print(s, file=sys.stderr)
stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size") | python | def count(args):
"""
%prog count cdhit.consensus.fasta
Scan the headers for the consensus clusters and count the number of reads.
"""
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.utils.cbook import SummaryStats
p = OptionParser(count.__doc__)
p.add_option("--csv", help="Write depth per contig to file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
csv = open(opts.csv, "w") if opts.csv else None
f = Fasta(fastafile, lazy=True)
sizes = []
for desc, rec in f.iterdescriptions_ordered():
if desc.startswith("singleton"):
sizes.append(1)
continue
# consensus_for_cluster_0 with 63 sequences
if "with" in desc:
name, w, size, seqs = desc.split()
if csv:
print("\t".join(str(x)
for x in (name, size, len(rec))), file=csv)
assert w == "with"
sizes.append(int(size))
# MRD85:00603:02472;size=167;
else:
name, size, tail = desc.split(";")
sizes.append(int(size.replace("size=", "")))
if csv:
csv.close()
logging.debug("File written to `{0}`".format(opts.csv))
s = SummaryStats(sizes)
print(s, file=sys.stderr)
stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size") | [
"def",
"count",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"graphics",
".",
"histogram",
"import",
"stem_leaf_plot",
"from",
"jcvi",
".",
"utils",
".",
"cbook",
"import",
"SummaryStats",
"p",
"=",
"OptionParser",
"(",
"count",
".",
"__doc__",
")",
"p",
... | %prog count cdhit.consensus.fasta
Scan the headers for the consensus clusters and count the number of reads. | [
"%prog",
"count",
"cdhit",
".",
"consensus",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L279-L324 | train | 200,938 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | novo | def novo(args):
"""
%prog novo reads.fastq
Reference-free tGBS pipeline v1.
"""
from jcvi.assembly.kmer import jellyfish, histogram
from jcvi.assembly.preprocess import diginorm
from jcvi.formats.fasta import filter as fasta_filter, format
from jcvi.apps.cdhit import filter as cdhit_filter
p = OptionParser(novo.__doc__)
p.add_option("--technology", choices=("illumina", "454", "iontorrent"),
default="iontorrent", help="Sequencing platform")
p.set_depth(depth=50)
p.set_align(pctid=96)
p.set_home("cdhit", default="/usr/local/bin/")
p.set_home("fiona", default="/usr/local/bin/")
p.set_home("jellyfish", default="/usr/local/bin/")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
cpus = opts.cpus
depth = opts.depth
pf, sf = fastqfile.rsplit(".", 1)
diginormfile = pf + ".diginorm." + sf
if need_update(fastqfile, diginormfile):
diginorm([fastqfile, "--single", "--depth={0}".format(depth)])
keepabund = fastqfile + ".keep.abundfilt"
sh("cp -s {0} {1}".format(keepabund, diginormfile))
jf = pf + "-K23.histogram"
if need_update(diginormfile, jf):
jellyfish([diginormfile, "--prefix={0}".format(pf),
"--cpus={0}".format(cpus),
"--jellyfish_home={0}".format(opts.jellyfish_home)])
genomesize = histogram([jf, pf, "23"])
fiona = pf + ".fiona.fa"
if need_update(diginormfile, fiona):
cmd = op.join(opts.fiona_home, "fiona")
cmd += " -g {0} -nt {1} --sequencing-technology {2}".\
format(genomesize, cpus, opts.technology)
cmd += " -vv {0} {1}".format(diginormfile, fiona)
logfile = pf + ".fiona.log"
sh(cmd, outfile=logfile, errfile=logfile)
dedup = "cdhit"
pctid = opts.pctid
cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup)
if need_update(fiona, cons):
deduplicate([fiona, "--consensus", "--reads",
"--pctid={0}".format(pctid),
"--cdhit_home={0}".format(opts.cdhit_home)])
filteredfile = pf + ".filtered.fasta"
if need_update(cons, filteredfile):
covfile = pf + ".cov.fasta"
cdhit_filter([cons, "--outfile={0}".format(covfile),
"--minsize={0}".format(depth / 5)])
fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)])
finalfile = pf + ".final.fasta"
if need_update(filteredfile, finalfile):
format([filteredfile, finalfile, "--sequential=replace",
"--prefix={0}_".format(pf)]) | python | def novo(args):
"""
%prog novo reads.fastq
Reference-free tGBS pipeline v1.
"""
from jcvi.assembly.kmer import jellyfish, histogram
from jcvi.assembly.preprocess import diginorm
from jcvi.formats.fasta import filter as fasta_filter, format
from jcvi.apps.cdhit import filter as cdhit_filter
p = OptionParser(novo.__doc__)
p.add_option("--technology", choices=("illumina", "454", "iontorrent"),
default="iontorrent", help="Sequencing platform")
p.set_depth(depth=50)
p.set_align(pctid=96)
p.set_home("cdhit", default="/usr/local/bin/")
p.set_home("fiona", default="/usr/local/bin/")
p.set_home("jellyfish", default="/usr/local/bin/")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
cpus = opts.cpus
depth = opts.depth
pf, sf = fastqfile.rsplit(".", 1)
diginormfile = pf + ".diginorm." + sf
if need_update(fastqfile, diginormfile):
diginorm([fastqfile, "--single", "--depth={0}".format(depth)])
keepabund = fastqfile + ".keep.abundfilt"
sh("cp -s {0} {1}".format(keepabund, diginormfile))
jf = pf + "-K23.histogram"
if need_update(diginormfile, jf):
jellyfish([diginormfile, "--prefix={0}".format(pf),
"--cpus={0}".format(cpus),
"--jellyfish_home={0}".format(opts.jellyfish_home)])
genomesize = histogram([jf, pf, "23"])
fiona = pf + ".fiona.fa"
if need_update(diginormfile, fiona):
cmd = op.join(opts.fiona_home, "fiona")
cmd += " -g {0} -nt {1} --sequencing-technology {2}".\
format(genomesize, cpus, opts.technology)
cmd += " -vv {0} {1}".format(diginormfile, fiona)
logfile = pf + ".fiona.log"
sh(cmd, outfile=logfile, errfile=logfile)
dedup = "cdhit"
pctid = opts.pctid
cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup)
if need_update(fiona, cons):
deduplicate([fiona, "--consensus", "--reads",
"--pctid={0}".format(pctid),
"--cdhit_home={0}".format(opts.cdhit_home)])
filteredfile = pf + ".filtered.fasta"
if need_update(cons, filteredfile):
covfile = pf + ".cov.fasta"
cdhit_filter([cons, "--outfile={0}".format(covfile),
"--minsize={0}".format(depth / 5)])
fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)])
finalfile = pf + ".final.fasta"
if need_update(filteredfile, finalfile):
format([filteredfile, finalfile, "--sequential=replace",
"--prefix={0}_".format(pf)]) | [
"def",
"novo",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"kmer",
"import",
"jellyfish",
",",
"histogram",
"from",
"jcvi",
".",
"assembly",
".",
"preprocess",
"import",
"diginorm",
"from",
"jcvi",
".",
"formats",
".",
"fasta",
"import",
... | %prog novo reads.fastq
Reference-free tGBS pipeline v1. | [
"%prog",
"novo",
"reads",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L327-L397 | train | 200,939 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | novo2 | def novo2(args):
"""
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
"""
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
trimmed, pf = args
pctid = opts.pctid
reads, samples = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = "uclust"
acdir = "allele_counts"
for d in (clustdir, acdir):
mkdir(d)
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split(".")[0] == s]
outfile = s + ".P{0}.clustS".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust cluster --cpus=8"
cmd += " {0} {1}".format(s, " ".join(flist))
cmd += " --outdir={0}".format(clustdir)
cmd += " --pctid={0}".format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile)
# Step 1 - make consensus within sample
allcons = []
for s, clustfile in zip(samples, clustfiles):
outfile = s + ".P{0}.consensus".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust consensus"
cmd += " {0}".format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile)
# Step 2 - clustering across samples
clustSfile = pf + ".P{0}.clustS".format(pctid)
cmd = "python -m jcvi.apps.uclust mcluster {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + ".P{0}.loci".format(pctid)
cmd = "python -m jcvi.apps.uclust mconsensus {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write() | python | def novo2(args):
"""
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
"""
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
trimmed, pf = args
pctid = opts.pctid
reads, samples = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = "uclust"
acdir = "allele_counts"
for d in (clustdir, acdir):
mkdir(d)
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split(".")[0] == s]
outfile = s + ".P{0}.clustS".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust cluster --cpus=8"
cmd += " {0} {1}".format(s, " ".join(flist))
cmd += " --outdir={0}".format(clustdir)
cmd += " --pctid={0}".format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile)
# Step 1 - make consensus within sample
allcons = []
for s, clustfile in zip(samples, clustfiles):
outfile = s + ".P{0}.consensus".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust consensus"
cmd += " {0}".format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile)
# Step 2 - clustering across samples
clustSfile = pf + ".P{0}.clustS".format(pctid)
cmd = "python -m jcvi.apps.uclust mcluster {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + ".P{0}.loci".format(pctid)
cmd = "python -m jcvi.apps.uclust mconsensus {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write() | [
"def",
"novo2",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"novo2",
".",
"__doc__",
")",
"p",
".",
"set_fastq_names",
"(",
")",
"p",
".",
"set_align",
"(",
"pctid",
"=",
"95",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
... | %prog novo2 trimmed projectname
Reference-free tGBS pipeline v2. | [
"%prog",
"novo2",
"trimmed",
"projectname"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L408-L468 | train | 200,940 |
tanghaibao/jcvi | jcvi/projects/tgbs.py | snpplot | def snpplot(args):
"""
%prog counts.cdt
Illustrate the histogram per SNP site.
"""
p = OptionParser(snpplot.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
# Read in CDT file
fp = open(datafile)
next(fp)
next(fp)
data = []
for row in fp:
atoms = row.split()[4:]
nval = len(atoms)
values = [float(x) for x in atoms]
# normalize
values = [x * 1. / sum(values) for x in values]
data.append(values)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
xmin, xmax = .1, .9
ymin, ymax = .1, .9
yinterval = (ymax - ymin) / len(data)
colors = "rbg" if nval == 3 else ["lightgray"] + list("rbg")
ystart = ymax
for d in data:
xstart = xmin
for dd, c in zip(d, colors):
xend = xstart + (xmax - xmin) * dd
root.plot((xstart, xend), (ystart, ystart), "-", color=c)
xstart = xend
ystart -= yinterval
root.text(.05, .5, "{0} LMD50 SNPs".format(len(data)),
ha="center", va="center", rotation=90, color="lightslategray")
for x, t, c in zip((.3, .5, .7), ("REF", "ALT", "HET"), "rbg"):
root.text(x, .95, t, color=c, ha="center", va="center")
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | python | def snpplot(args):
"""
%prog counts.cdt
Illustrate the histogram per SNP site.
"""
p = OptionParser(snpplot.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
# Read in CDT file
fp = open(datafile)
next(fp)
next(fp)
data = []
for row in fp:
atoms = row.split()[4:]
nval = len(atoms)
values = [float(x) for x in atoms]
# normalize
values = [x * 1. / sum(values) for x in values]
data.append(values)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
xmin, xmax = .1, .9
ymin, ymax = .1, .9
yinterval = (ymax - ymin) / len(data)
colors = "rbg" if nval == 3 else ["lightgray"] + list("rbg")
ystart = ymax
for d in data:
xstart = xmin
for dd, c in zip(d, colors):
xend = xstart + (xmax - xmin) * dd
root.plot((xstart, xend), (ystart, ystart), "-", color=c)
xstart = xend
ystart -= yinterval
root.text(.05, .5, "{0} LMD50 SNPs".format(len(data)),
ha="center", va="center", rotation=90, color="lightslategray")
for x, t, c in zip((.3, .5, .7), ("REF", "ALT", "HET"), "rbg"):
root.text(x, .95, t, color=c, ha="center", va="center")
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"snpplot",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"snpplot",
".",
"__doc__",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"format",
"=",
"\"png\"",
")",
"if",
"len",
"(",
"args",
... | %prog counts.cdt
Illustrate the histogram per SNP site. | [
"%prog",
"counts",
".",
"cdt"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/tgbs.py#L612-L662 | train | 200,941 |
tanghaibao/jcvi | jcvi/assembly/pbjelly.py | filterm4 | def filterm4(args):
"""
%prog filterm4 sample.m4 > filtered.m4
Filter .m4 file after blasr is run. As blasr takes a long time to run,
changing -bestn is undesirable. This screens the m4 file to retain top hits.
"""
p = OptionParser(filterm4.__doc__)
p.add_option("--best", default=1, type="int", help="Only retain best N hits")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
m4file, = args
best = opts.best
fp = open(m4file)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
retained = total = 0
for row in fp:
r = M4Line(row)
total += 1
if total % 100000 == 0:
logging.debug("Retained {0} lines".\
format(percentage(retained, total)))
if seen.get(r.query, 0) < best:
fw.write(row)
seen[r.query] += 1
retained += 1
fw.close() | python | def filterm4(args):
"""
%prog filterm4 sample.m4 > filtered.m4
Filter .m4 file after blasr is run. As blasr takes a long time to run,
changing -bestn is undesirable. This screens the m4 file to retain top hits.
"""
p = OptionParser(filterm4.__doc__)
p.add_option("--best", default=1, type="int", help="Only retain best N hits")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
m4file, = args
best = opts.best
fp = open(m4file)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
retained = total = 0
for row in fp:
r = M4Line(row)
total += 1
if total % 100000 == 0:
logging.debug("Retained {0} lines".\
format(percentage(retained, total)))
if seen.get(r.query, 0) < best:
fw.write(row)
seen[r.query] += 1
retained += 1
fw.close() | [
"def",
"filterm4",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"filterm4",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--best\"",
",",
"default",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Only retain best N hits\"",
"... | %prog filterm4 sample.m4 > filtered.m4
Filter .m4 file after blasr is run. As blasr takes a long time to run,
changing -bestn is undesirable. This screens the m4 file to retain top hits. | [
"%prog",
"filterm4",
"sample",
".",
"m4",
">",
"filtered",
".",
"m4"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/pbjelly.py#L95-L126 | train | 200,942 |
tanghaibao/jcvi | jcvi/assembly/pbjelly.py | spancount | def spancount(args):
"""
%prog spancount list_of_fillingMetrics
Count span support for each gap. A file with paths of all fillingMetrics can
be built with Linux `find`.
$ (find assembly -name "fillingMetrics.json" -print > list_of_fillMetrics 2>
/dev/null &)
"""
import json
p = OptionParser(spancount.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fof, = args
fp = open(fof)
flist = [row.strip() for row in fp]
spanCount = "spanCount"
avgSpanBases = "avgSpanBases"
fw = open(spanCount, "w")
for f in flist:
fp = open(f)
j = json.load(fp)
sc = j.get(spanCount, None)
asb = j.get(avgSpanBases, None)
print(f, asb, sc, file=fw)
fw.flush()
fw.close() | python | def spancount(args):
"""
%prog spancount list_of_fillingMetrics
Count span support for each gap. A file with paths of all fillingMetrics can
be built with Linux `find`.
$ (find assembly -name "fillingMetrics.json" -print > list_of_fillMetrics 2>
/dev/null &)
"""
import json
p = OptionParser(spancount.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fof, = args
fp = open(fof)
flist = [row.strip() for row in fp]
spanCount = "spanCount"
avgSpanBases = "avgSpanBases"
fw = open(spanCount, "w")
for f in flist:
fp = open(f)
j = json.load(fp)
sc = j.get(spanCount, None)
asb = j.get(avgSpanBases, None)
print(f, asb, sc, file=fw)
fw.flush()
fw.close() | [
"def",
"spancount",
"(",
"args",
")",
":",
"import",
"json",
"p",
"=",
"OptionParser",
"(",
"spancount",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
... | %prog spancount list_of_fillingMetrics
Count span support for each gap. A file with paths of all fillingMetrics can
be built with Linux `find`.
$ (find assembly -name "fillingMetrics.json" -print > list_of_fillMetrics 2>
/dev/null &) | [
"%prog",
"spancount",
"list_of_fillingMetrics"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/pbjelly.py#L129-L160 | train | 200,943 |
tanghaibao/jcvi | jcvi/assembly/pbjelly.py | patch | def patch(args):
"""
%prog patch reference.fasta reads.fasta
Run PBJelly with reference and reads.
"""
from jcvi.formats.base import write_file
from jcvi.formats.fasta import format
p = OptionParser(patch.__doc__)
p.add_option("--cleanfasta", default=False, action="store_true",
help="Clean FASTA to remove description [default: %default]")
p.add_option("--highqual", default=False, action="store_true",
help="Reads are of high quality [default: %default]")
p.set_home("pbjelly")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, reads = args
cpus = opts.cpus
cmd = op.join(opts.pbjelly_home, "setup.sh")
setup = "source {0}".format(cmd)
if not which("fakeQuals.py"):
sh(setup)
pf = ref.rsplit(".", 1)[0]
pr, px = reads.rsplit(".", 1)
# Remove description line
if opts.cleanfasta:
oref = pf + ".f.fasta"
oreads = pr + ".f.fasta"
format([ref, oref])
format([reads, oreads])
ref, reads = oref, oreads
# Check if the FASTA has qual
ref, refq = fake_quals(ref)
convert_reads = not px in ("fq", "fastq", "txt")
if convert_reads:
reads, readsq = fake_quals(reads)
readsfiles = " ".join((reads, readsq))
else:
readsfiles = reads
# Make directory structure
dref, dreads = "data/reference", "data/reads"
cwd = os.getcwd()
reference = op.join(cwd, "{0}/{1}".format(dref, ref))
reads = op.join(cwd, "{0}/{1}".format(dreads, reads))
if not op.exists(reference):
sh("mkdir -p {0}".format(dref))
sh("cp {0} {1}/".format(" ".join((ref, refq)), dref))
if not op.exists(reads):
sh("mkdir -p {0}".format(dreads))
sh("cp {0} {1}/".format(readsfiles, dreads))
outputDir = cwd
p = Protocol(outputDir, reference, reads, highqual=opts.highqual)
p.write_xml()
# Build the pipeline
runsh = [setup]
for action in "setup|mapping|support|extraction".split("|"):
runsh.append("Jelly.py {0} Protocol.xml".format(action))
runsh.append('Jelly.py assembly Protocol.xml -x "--nproc={0}"'.format(cpus))
runsh.append("Jelly.py output Protocol.xml")
runfile = "run.sh"
contents = "\n".join(runsh)
write_file(runfile, contents) | python | def patch(args):
"""
%prog patch reference.fasta reads.fasta
Run PBJelly with reference and reads.
"""
from jcvi.formats.base import write_file
from jcvi.formats.fasta import format
p = OptionParser(patch.__doc__)
p.add_option("--cleanfasta", default=False, action="store_true",
help="Clean FASTA to remove description [default: %default]")
p.add_option("--highqual", default=False, action="store_true",
help="Reads are of high quality [default: %default]")
p.set_home("pbjelly")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, reads = args
cpus = opts.cpus
cmd = op.join(opts.pbjelly_home, "setup.sh")
setup = "source {0}".format(cmd)
if not which("fakeQuals.py"):
sh(setup)
pf = ref.rsplit(".", 1)[0]
pr, px = reads.rsplit(".", 1)
# Remove description line
if opts.cleanfasta:
oref = pf + ".f.fasta"
oreads = pr + ".f.fasta"
format([ref, oref])
format([reads, oreads])
ref, reads = oref, oreads
# Check if the FASTA has qual
ref, refq = fake_quals(ref)
convert_reads = not px in ("fq", "fastq", "txt")
if convert_reads:
reads, readsq = fake_quals(reads)
readsfiles = " ".join((reads, readsq))
else:
readsfiles = reads
# Make directory structure
dref, dreads = "data/reference", "data/reads"
cwd = os.getcwd()
reference = op.join(cwd, "{0}/{1}".format(dref, ref))
reads = op.join(cwd, "{0}/{1}".format(dreads, reads))
if not op.exists(reference):
sh("mkdir -p {0}".format(dref))
sh("cp {0} {1}/".format(" ".join((ref, refq)), dref))
if not op.exists(reads):
sh("mkdir -p {0}".format(dreads))
sh("cp {0} {1}/".format(readsfiles, dreads))
outputDir = cwd
p = Protocol(outputDir, reference, reads, highqual=opts.highqual)
p.write_xml()
# Build the pipeline
runsh = [setup]
for action in "setup|mapping|support|extraction".split("|"):
runsh.append("Jelly.py {0} Protocol.xml".format(action))
runsh.append('Jelly.py assembly Protocol.xml -x "--nproc={0}"'.format(cpus))
runsh.append("Jelly.py output Protocol.xml")
runfile = "run.sh"
contents = "\n".join(runsh)
write_file(runfile, contents) | [
"def",
"patch",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"write_file",
"from",
"jcvi",
".",
"formats",
".",
"fasta",
"import",
"format",
"p",
"=",
"OptionParser",
"(",
"patch",
".",
"__doc__",
")",
"p",
".",
"add_op... | %prog patch reference.fasta reads.fasta
Run PBJelly with reference and reads. | [
"%prog",
"patch",
"reference",
".",
"fasta",
"reads",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/pbjelly.py#L172-L245 | train | 200,944 |
tanghaibao/jcvi | jcvi/utils/taxonomy.py | isPlantOrigin | def isPlantOrigin(taxid):
"""
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
"""
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid)) | python | def isPlantOrigin(taxid):
"""
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
"""
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid)) | [
"def",
"isPlantOrigin",
"(",
"taxid",
")",
":",
"assert",
"isinstance",
"(",
"taxid",
",",
"int",
")",
"t",
"=",
"TaxIDTree",
"(",
"taxid",
")",
"try",
":",
"return",
"\"Viridiplantae\"",
"in",
"str",
"(",
"t",
")",
"except",
"AttributeError",
":",
"rais... | Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True | [
"Given",
"a",
"taxid",
"this",
"gets",
"the",
"expanded",
"tree",
"which",
"can",
"then",
"be",
"checked",
"to",
"see",
"if",
"the",
"organism",
"is",
"a",
"plant",
"or",
"not"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/taxonomy.py#L136-L151 | train | 200,945 |
tanghaibao/jcvi | jcvi/utils/taxonomy.py | newick | def newick(args):
"""
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
"""
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t) | python | def newick(args):
"""
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
"""
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t) | [
"def",
"newick",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"newick",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"n... | %prog newick idslist
Query a list of IDs to retrieve phylogeny. | [
"%prog",
"newick",
"idslist"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/taxonomy.py#L179-L196 | train | 200,946 |
tanghaibao/jcvi | jcvi/formats/sam.py | fastq | def fastq(args):
"""
%prog fastq bamfile prefix
Convert BAM files to paired FASTQ files.
"""
p = OptionParser(fastq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, pf = args
singletons = pf + ".se.fastq"
a = pf + ".read1.fastq"
b = pf + ".read2.fastq"
cmd = "samtools collate -uOn 128 {} tmp-prefix".format(bamfile)
cmd += " | samtools fastq -s {} -1 {} -2 {} -"\
.format(singletons, a, b)
sh(cmd)
if os.stat(singletons).st_size == 0: # singleton file is empty
os.remove(singletons)
return a, b | python | def fastq(args):
"""
%prog fastq bamfile prefix
Convert BAM files to paired FASTQ files.
"""
p = OptionParser(fastq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, pf = args
singletons = pf + ".se.fastq"
a = pf + ".read1.fastq"
b = pf + ".read2.fastq"
cmd = "samtools collate -uOn 128 {} tmp-prefix".format(bamfile)
cmd += " | samtools fastq -s {} -1 {} -2 {} -"\
.format(singletons, a, b)
sh(cmd)
if os.stat(singletons).st_size == 0: # singleton file is empty
os.remove(singletons)
return a, b | [
"def",
"fastq",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fastq",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not... | %prog fastq bamfile prefix
Convert BAM files to paired FASTQ files. | [
"%prog",
"fastq",
"bamfile",
"prefix"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L224-L248 | train | 200,947 |
tanghaibao/jcvi | jcvi/formats/sam.py | mini | def mini(args):
"""
%prog mini bamfile region
Extract mini-bam for a single region.
"""
p = OptionParser(mini.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, region = args
get_minibam(bamfile, region) | python | def mini(args):
"""
%prog mini bamfile region
Extract mini-bam for a single region.
"""
p = OptionParser(mini.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, region = args
get_minibam(bamfile, region) | [
"def",
"mini",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mini",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",... | %prog mini bamfile region
Extract mini-bam for a single region. | [
"%prog",
"mini",
"bamfile",
"region"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L251-L264 | train | 200,948 |
tanghaibao/jcvi | jcvi/formats/sam.py | noclip | def noclip(args):
"""
%prog noclip bamfile
Remove clipped reads from BAM.
"""
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam)) | python | def noclip(args):
"""
%prog noclip bamfile
Remove clipped reads from BAM.
"""
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam)) | [
"def",
"noclip",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"noclip",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"n... | %prog noclip bamfile
Remove clipped reads from BAM. | [
"%prog",
"noclip",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L267-L285 | train | 200,949 |
tanghaibao/jcvi | jcvi/formats/sam.py | append | def append(args):
"""
%prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models.
"""
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == '@':
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin) | python | def append(args):
"""
%prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models.
"""
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == '@':
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin) | [
"def",
"append",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"append",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--prepend\"",
",",
"help",
"=",
"\"Prepend string to read names\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",... | %prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models. | [
"%prog",
"append",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L288-L318 | train | 200,950 |
tanghaibao/jcvi | jcvi/formats/sam.py | bed | def bed(args):
"""
%prog bed bedfile bamfiles
Convert bam files to bed.
"""
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
bedfile = args[0]
bamfiles = args[1:]
for bamfile in bamfiles:
cmd = "bamToBed -i {0}".format(bamfile)
sh(cmd, outfile=bedfile, append=True) | python | def bed(args):
"""
%prog bed bedfile bamfiles
Convert bam files to bed.
"""
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
bedfile = args[0]
bamfiles = args[1:]
for bamfile in bamfiles:
cmd = "bamToBed -i {0}".format(bamfile)
sh(cmd, outfile=bedfile, append=True) | [
"def",
"bed",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"bed",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"<",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
... | %prog bed bedfile bamfiles
Convert bam files to bed. | [
"%prog",
"bed",
"bedfile",
"bamfiles"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L321-L337 | train | 200,951 |
tanghaibao/jcvi | jcvi/formats/sam.py | merge | def merge(args):
"""
%prog merge merged_bams bams1_dir bams2_dir ...
Merge BAM files. Treat the bams with the same prefix as a set.
Output the commands first.
"""
from jcvi.apps.grid import MakeManager
p = OptionParser(merge.__doc__)
p.set_sep(sep="_", help="Separator to group per prefix")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
merged_bams = args[0]
bamdirs = args[1:]
mkdir(merged_bams)
bams = []
for x in bamdirs:
bams += glob(op.join(x, "*.bam"))
bams = [x for x in bams if "nsorted" not in x]
logging.debug("Found a total of {0} BAM files.".format(len(bams)))
sep = opts.sep
key = lambda x: op.basename(x).split(sep)[0]
bams.sort(key=key)
mm = MakeManager()
for prefix, files in groupby(bams, key=key):
files = sorted(list(files))
nfiles = len(files)
source = " ".join(files)
target = op.join(merged_bams, op.basename(files[0]))
if nfiles == 1:
source = get_abs_path(source)
cmd = "ln -s {0} {1}".format(source, target)
mm.add("", target, cmd)
else:
cmd = "samtools merge -@ 8 {0} {1}".format(target, source)
mm.add(files, target, cmd, remove=True)
mm.write() | python | def merge(args):
"""
%prog merge merged_bams bams1_dir bams2_dir ...
Merge BAM files. Treat the bams with the same prefix as a set.
Output the commands first.
"""
from jcvi.apps.grid import MakeManager
p = OptionParser(merge.__doc__)
p.set_sep(sep="_", help="Separator to group per prefix")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
merged_bams = args[0]
bamdirs = args[1:]
mkdir(merged_bams)
bams = []
for x in bamdirs:
bams += glob(op.join(x, "*.bam"))
bams = [x for x in bams if "nsorted" not in x]
logging.debug("Found a total of {0} BAM files.".format(len(bams)))
sep = opts.sep
key = lambda x: op.basename(x).split(sep)[0]
bams.sort(key=key)
mm = MakeManager()
for prefix, files in groupby(bams, key=key):
files = sorted(list(files))
nfiles = len(files)
source = " ".join(files)
target = op.join(merged_bams, op.basename(files[0]))
if nfiles == 1:
source = get_abs_path(source)
cmd = "ln -s {0} {1}".format(source, target)
mm.add("", target, cmd)
else:
cmd = "samtools merge -@ 8 {0} {1}".format(target, source)
mm.add(files, target, cmd, remove=True)
mm.write() | [
"def",
"merge",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"grid",
"import",
"MakeManager",
"p",
"=",
"OptionParser",
"(",
"merge",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
"sep",
"=",
"\"_\"",
",",
"help",
"=",
"\"Separator to grou... | %prog merge merged_bams bams1_dir bams2_dir ...
Merge BAM files. Treat the bams with the same prefix as a set.
Output the commands first. | [
"%prog",
"merge",
"merged_bams",
"bams1_dir",
"bams2_dir",
"..."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L340-L383 | train | 200,952 |
tanghaibao/jcvi | jcvi/formats/sam.py | count | def count(args):
"""
%prog count bamfile gtf
Count the number of reads mapped using `htseq-count`.
"""
p = OptionParser(count.__doc__)
p.add_option("--type", default="exon",
help="Only count feature type")
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, gtf = args
cpus = opts.cpus
pf = bamfile.split(".")[0]
countfile = pf + ".count"
if not need_update(bamfile, countfile):
return
nsorted = pf + "_nsorted"
nsortedbam, nsortedsam = nsorted + ".bam", nsorted + ".sam"
if need_update(bamfile, nsortedsam):
cmd = "samtools sort -@ {0} -n {1} {2}".format(cpus, bamfile, nsorted)
sh(cmd)
cmd = "samtools view -@ {0} -h {1}".format(cpus, nsortedbam)
sh(cmd, outfile=nsortedsam)
if need_update(nsortedsam, countfile):
cmd = "htseq-count --stranded=no --minaqual=10"
cmd += " -t {0}".format(opts.type)
cmd += " {0} {1}".format(nsortedsam, gtf)
sh(cmd, outfile=countfile) | python | def count(args):
"""
%prog count bamfile gtf
Count the number of reads mapped using `htseq-count`.
"""
p = OptionParser(count.__doc__)
p.add_option("--type", default="exon",
help="Only count feature type")
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, gtf = args
cpus = opts.cpus
pf = bamfile.split(".")[0]
countfile = pf + ".count"
if not need_update(bamfile, countfile):
return
nsorted = pf + "_nsorted"
nsortedbam, nsortedsam = nsorted + ".bam", nsorted + ".sam"
if need_update(bamfile, nsortedsam):
cmd = "samtools sort -@ {0} -n {1} {2}".format(cpus, bamfile, nsorted)
sh(cmd)
cmd = "samtools view -@ {0} -h {1}".format(cpus, nsortedbam)
sh(cmd, outfile=nsortedsam)
if need_update(nsortedsam, countfile):
cmd = "htseq-count --stranded=no --minaqual=10"
cmd += " -t {0}".format(opts.type)
cmd += " {0} {1}".format(nsortedsam, gtf)
sh(cmd, outfile=countfile) | [
"def",
"count",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"count",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--type\"",
",",
"default",
"=",
"\"exon\"",
",",
"help",
"=",
"\"Only count feature type\"",
")",
"p",
".",
"set_cpus",
"(... | %prog count bamfile gtf
Count the number of reads mapped using `htseq-count`. | [
"%prog",
"count",
"bamfile",
"gtf"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L386-L420 | train | 200,953 |
tanghaibao/jcvi | jcvi/formats/sam.py | coverage | def coverage(args):
"""
%prog coverage fastafile bamfile
Calculate coverage for BAM file. BAM file will be sorted unless with
--nosort.
"""
p = OptionParser(coverage.__doc__)
p.add_option("--format", default="bigwig",
choices=("bedgraph", "bigwig", "coverage"),
help="Output format")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort BAM")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
format = opts.format
if opts.nosort:
logging.debug("BAM sorting skipped")
else:
bamfile = index([bamfile, "--fasta={0}".format(fastafile)])
pf = bamfile.rsplit(".", 2)[0]
sizesfile = Sizes(fastafile).filename
cmd = "genomeCoverageBed -ibam {0} -g {1}".format(bamfile, sizesfile)
if format in ("bedgraph", "bigwig"):
cmd += " -bg"
bedgraphfile = pf + ".bedgraph"
sh(cmd, outfile=bedgraphfile)
if format == "bedgraph":
return bedgraphfile
bigwigfile = pf + ".bigwig"
cmd = "bedGraphToBigWig {0} {1} {2}".\
format(bedgraphfile, sizesfile, bigwigfile)
sh(cmd)
return bigwigfile
coveragefile = pf + ".coverage"
if need_update(fastafile, coveragefile):
sh(cmd, outfile=coveragefile)
gcf = GenomeCoverageFile(coveragefile)
fw = must_open(opts.outfile, "w")
for seqid, cov in gcf.iter_coverage_seqid():
print("\t".join((seqid, "{0:.1f}".format(cov))), file=fw)
fw.close() | python | def coverage(args):
"""
%prog coverage fastafile bamfile
Calculate coverage for BAM file. BAM file will be sorted unless with
--nosort.
"""
p = OptionParser(coverage.__doc__)
p.add_option("--format", default="bigwig",
choices=("bedgraph", "bigwig", "coverage"),
help="Output format")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort BAM")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
format = opts.format
if opts.nosort:
logging.debug("BAM sorting skipped")
else:
bamfile = index([bamfile, "--fasta={0}".format(fastafile)])
pf = bamfile.rsplit(".", 2)[0]
sizesfile = Sizes(fastafile).filename
cmd = "genomeCoverageBed -ibam {0} -g {1}".format(bamfile, sizesfile)
if format in ("bedgraph", "bigwig"):
cmd += " -bg"
bedgraphfile = pf + ".bedgraph"
sh(cmd, outfile=bedgraphfile)
if format == "bedgraph":
return bedgraphfile
bigwigfile = pf + ".bigwig"
cmd = "bedGraphToBigWig {0} {1} {2}".\
format(bedgraphfile, sizesfile, bigwigfile)
sh(cmd)
return bigwigfile
coveragefile = pf + ".coverage"
if need_update(fastafile, coveragefile):
sh(cmd, outfile=coveragefile)
gcf = GenomeCoverageFile(coveragefile)
fw = must_open(opts.outfile, "w")
for seqid, cov in gcf.iter_coverage_seqid():
print("\t".join((seqid, "{0:.1f}".format(cov))), file=fw)
fw.close() | [
"def",
"coverage",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"coverage",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--format\"",
",",
"default",
"=",
"\"bigwig\"",
",",
"choices",
"=",
"(",
"\"bedgraph\"",
",",
"\"bigwig\"",
",",
"\... | %prog coverage fastafile bamfile
Calculate coverage for BAM file. BAM file will be sorted unless with
--nosort. | [
"%prog",
"coverage",
"fastafile",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L423-L474 | train | 200,954 |
tanghaibao/jcvi | jcvi/formats/sam.py | consensus | def consensus(args):
"""
%prog consensus fastafile bamfile
Convert bam alignments to consensus FASTQ/FASTA.
"""
p = OptionParser(consensus.__doc__)
p.add_option("--fasta", default=False, action="store_true",
help="Generate consensus FASTA sequences [default: %default]")
p.add_option("--mask", default=0, type="int",
help="Mask bases with quality lower than")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
fasta = opts.fasta
suffix = "fasta" if fasta else "fastq"
pf = bamfile.rsplit(".", 1)[0]
cnsfile = pf + ".cns.{0}".format(suffix)
vcfgzfile = pf + ".vcf.gz"
vcf([fastafile, bamfile, "-o", vcfgzfile])
cmd += "zcat {0} | vcfutils.pl vcf2fq".format(vcfgzfile)
if fasta:
cmd += " | seqtk seq -q {0} -A -".format(opts.mask)
sh(cmd, outfile=cnsfile) | python | def consensus(args):
"""
%prog consensus fastafile bamfile
Convert bam alignments to consensus FASTQ/FASTA.
"""
p = OptionParser(consensus.__doc__)
p.add_option("--fasta", default=False, action="store_true",
help="Generate consensus FASTA sequences [default: %default]")
p.add_option("--mask", default=0, type="int",
help="Mask bases with quality lower than")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
fasta = opts.fasta
suffix = "fasta" if fasta else "fastq"
pf = bamfile.rsplit(".", 1)[0]
cnsfile = pf + ".cns.{0}".format(suffix)
vcfgzfile = pf + ".vcf.gz"
vcf([fastafile, bamfile, "-o", vcfgzfile])
cmd += "zcat {0} | vcfutils.pl vcf2fq".format(vcfgzfile)
if fasta:
cmd += " | seqtk seq -q {0} -A -".format(opts.mask)
sh(cmd, outfile=cnsfile) | [
"def",
"consensus",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"consensus",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--fasta\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Generate consen... | %prog consensus fastafile bamfile
Convert bam alignments to consensus FASTQ/FASTA. | [
"%prog",
"consensus",
"fastafile",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L530-L557 | train | 200,955 |
tanghaibao/jcvi | jcvi/formats/sam.py | vcf | def vcf(args):
"""
%prog vcf fastafile bamfiles > out.vcf.gz
Call SNPs on bam files.
"""
from jcvi.apps.grid import Jobs
valid_callers = ("mpileup", "freebayes")
p = OptionParser(vcf.__doc__)
p.set_outfile(outfile="out.vcf.gz")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the BAM files")
p.add_option("--caller", default="mpileup", choices=valid_callers,
help="Use variant caller [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile = args[0]
bamfiles = args[1:]
caller = opts.caller
unsorted = [x for x in bamfiles if ".sorted." not in x]
if opts.nosort:
bamfiles = unsorted
else:
jargs = [[[x, "--unique"]] for x in unsorted]
jobs = Jobs(index, args=jargs)
jobs.run()
bamfiles = [x.replace(".sorted.bam", ".bam") for x in bamfiles]
bamfiles = [x.replace(".bam", ".sorted.bam") for x in bamfiles]
if caller == "mpileup":
cmd = "samtools mpileup -E -uf"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
cmd += " | bcftools call -vmO v"
elif caller == "freebayes":
cmd = "freebayes -f"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
sh(cmd, outfile=opts.outfile) | python | def vcf(args):
"""
%prog vcf fastafile bamfiles > out.vcf.gz
Call SNPs on bam files.
"""
from jcvi.apps.grid import Jobs
valid_callers = ("mpileup", "freebayes")
p = OptionParser(vcf.__doc__)
p.set_outfile(outfile="out.vcf.gz")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the BAM files")
p.add_option("--caller", default="mpileup", choices=valid_callers,
help="Use variant caller [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile = args[0]
bamfiles = args[1:]
caller = opts.caller
unsorted = [x for x in bamfiles if ".sorted." not in x]
if opts.nosort:
bamfiles = unsorted
else:
jargs = [[[x, "--unique"]] for x in unsorted]
jobs = Jobs(index, args=jargs)
jobs.run()
bamfiles = [x.replace(".sorted.bam", ".bam") for x in bamfiles]
bamfiles = [x.replace(".bam", ".sorted.bam") for x in bamfiles]
if caller == "mpileup":
cmd = "samtools mpileup -E -uf"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
cmd += " | bcftools call -vmO v"
elif caller == "freebayes":
cmd = "freebayes -f"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
sh(cmd, outfile=opts.outfile) | [
"def",
"vcf",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"grid",
"import",
"Jobs",
"valid_callers",
"=",
"(",
"\"mpileup\"",
",",
"\"freebayes\"",
")",
"p",
"=",
"OptionParser",
"(",
"vcf",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(... | %prog vcf fastafile bamfiles > out.vcf.gz
Call SNPs on bam files. | [
"%prog",
"vcf",
"fastafile",
"bamfiles",
">",
"out",
".",
"vcf",
".",
"gz"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L560-L601 | train | 200,956 |
tanghaibao/jcvi | jcvi/formats/sam.py | chimera | def chimera(args):
"""
%prog chimera bamfile
Parse BAM file from `bwasw` and list multi-hit reads and breakpoints.
"""
import pysam
from jcvi.utils.natsort import natsorted
p = OptionParser(chimera.__doc__)
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samfile, = args
samfile = pysam.AlignmentFile(samfile)
rstore = defaultdict(list)
hstore = defaultdict(int)
for r in samfile.fetch():
rstore[r.query_name] += list(breakpoint(r))
hstore[r.query_name] += 1
if opts.verbose:
print(r.query_name, "+-"[r.is_reverse], \
sum(l for o, l in r.cigartuples), r.cigarstring, list(breakpoint(r)), file=sys.stderr)
for rn, bps in natsorted(rstore.items()):
bps = "|".join(str(x) for x in sorted(bps)) if bps else "na"
print("\t".join((rn, str(hstore[rn]), bps))) | python | def chimera(args):
"""
%prog chimera bamfile
Parse BAM file from `bwasw` and list multi-hit reads and breakpoints.
"""
import pysam
from jcvi.utils.natsort import natsorted
p = OptionParser(chimera.__doc__)
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samfile, = args
samfile = pysam.AlignmentFile(samfile)
rstore = defaultdict(list)
hstore = defaultdict(int)
for r in samfile.fetch():
rstore[r.query_name] += list(breakpoint(r))
hstore[r.query_name] += 1
if opts.verbose:
print(r.query_name, "+-"[r.is_reverse], \
sum(l for o, l in r.cigartuples), r.cigarstring, list(breakpoint(r)), file=sys.stderr)
for rn, bps in natsorted(rstore.items()):
bps = "|".join(str(x) for x in sorted(bps)) if bps else "na"
print("\t".join((rn, str(hstore[rn]), bps))) | [
"def",
"chimera",
"(",
"args",
")",
":",
"import",
"pysam",
"from",
"jcvi",
".",
"utils",
".",
"natsort",
"import",
"natsorted",
"p",
"=",
"OptionParser",
"(",
"chimera",
".",
"__doc__",
")",
"p",
".",
"set_verbose",
"(",
")",
"opts",
",",
"args",
"=",... | %prog chimera bamfile
Parse BAM file from `bwasw` and list multi-hit reads and breakpoints. | [
"%prog",
"chimera",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L616-L644 | train | 200,957 |
tanghaibao/jcvi | jcvi/formats/sam.py | pair | def pair(args):
"""
%prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos
"""
p = OptionParser(pair.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
def callback(s):
print(s.pairline)
Sam(args[0], callback=callback) | python | def pair(args):
"""
%prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos
"""
p = OptionParser(pair.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
def callback(s):
print(s.pairline)
Sam(args[0], callback=callback) | [
"def",
"pair",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"pair",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"p",
... | %prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos | [
"%prog",
"pair",
"samfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L754-L769 | train | 200,958 |
tanghaibao/jcvi | jcvi/formats/sam.py | cigar_to_seq | def cigar_to_seq(a, gap='*'):
"""
Accepts a pysam row.
cigar alignment is presented as a list of tuples (operation,length). For
example, the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3
matches, 5 insertions and another 2 matches.
Op BAM Description
M 0 alignment match (can be a sequence match or mismatch)
I 1 insertion to the reference
D 2 deletion from the reference
N 3 skipped region from the reference
S 4 soft clipping (clipped sequences present in SEQ)
H 5 hard clipping (clipped sequences NOT present in SEQ)
P 6 padding (silent deletion from padded reference)
= 7 sequence match
X 8 sequence mismatch
convert the sequence based on the cigar string. For example:
"""
seq, cigar = a.seq, a.cigar
start = 0
subseqs = []
npadded = 0
if cigar is None:
return None, npadded
for operation, length in cigar:
end = start if operation == 2 else start + length
if operation == 0: # match
subseq = seq[start:end]
elif operation == 1: # insertion
subseq = ""
elif operation == 2: # deletion
subseq = gap * length
npadded += length
elif operation == 3: # skipped
subseq = 'N' * length
elif operation in (4, 5): # clip
subseq = ""
else:
raise NotImplementedError
subseqs.append(subseq)
start = end
return "".join(subseqs), npadded | python | def cigar_to_seq(a, gap='*'):
"""
Accepts a pysam row.
cigar alignment is presented as a list of tuples (operation,length). For
example, the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3
matches, 5 insertions and another 2 matches.
Op BAM Description
M 0 alignment match (can be a sequence match or mismatch)
I 1 insertion to the reference
D 2 deletion from the reference
N 3 skipped region from the reference
S 4 soft clipping (clipped sequences present in SEQ)
H 5 hard clipping (clipped sequences NOT present in SEQ)
P 6 padding (silent deletion from padded reference)
= 7 sequence match
X 8 sequence mismatch
convert the sequence based on the cigar string. For example:
"""
seq, cigar = a.seq, a.cigar
start = 0
subseqs = []
npadded = 0
if cigar is None:
return None, npadded
for operation, length in cigar:
end = start if operation == 2 else start + length
if operation == 0: # match
subseq = seq[start:end]
elif operation == 1: # insertion
subseq = ""
elif operation == 2: # deletion
subseq = gap * length
npadded += length
elif operation == 3: # skipped
subseq = 'N' * length
elif operation in (4, 5): # clip
subseq = ""
else:
raise NotImplementedError
subseqs.append(subseq)
start = end
return "".join(subseqs), npadded | [
"def",
"cigar_to_seq",
"(",
"a",
",",
"gap",
"=",
"'*'",
")",
":",
"seq",
",",
"cigar",
"=",
"a",
".",
"seq",
",",
"a",
".",
"cigar",
"start",
"=",
"0",
"subseqs",
"=",
"[",
"]",
"npadded",
"=",
"0",
"if",
"cigar",
"is",
"None",
":",
"return",
... | Accepts a pysam row.
cigar alignment is presented as a list of tuples (operation,length). For
example, the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3
matches, 5 insertions and another 2 matches.
Op BAM Description
M 0 alignment match (can be a sequence match or mismatch)
I 1 insertion to the reference
D 2 deletion from the reference
N 3 skipped region from the reference
S 4 soft clipping (clipped sequences present in SEQ)
H 5 hard clipping (clipped sequences NOT present in SEQ)
P 6 padding (silent deletion from padded reference)
= 7 sequence match
X 8 sequence mismatch
convert the sequence based on the cigar string. For example: | [
"Accepts",
"a",
"pysam",
"row",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L772-L820 | train | 200,959 |
tanghaibao/jcvi | jcvi/assembly/allpaths.py | dump | def dump(args):
"""
%prog dump fastbfile
Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously
run allpaths folder.
"""
p = OptionParser(dump.__doc__)
p.add_option("--dir",
help="Working directory [default: %default]")
p.add_option("--nosim", default=False, action="store_true",
help="Do not simulate qual to 50 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastbfile, = args
d = opts.dir
if d:
from jcvi.assembly.preprocess import export_fastq
rc = "jump" in fastbfile
export_fastq(d, fastbfile, rc=rc)
return
sim = not opts.nosim
pf = "j" if "jump" in fastbfile else "f"
statsfile = "{0}.lib_stats".format(pf)
if op.exists(statsfile):
os.remove(statsfile)
cmd = "SplitReadsByLibrary READS_IN={0}".format(fastbfile)
cmd += " READS_OUT={0} QUALS=True".format(pf)
sh(cmd)
libs = []
fp = open(statsfile)
next(fp); next(fp) # skip two rows
for row in fp:
if row.strip() == "":
continue
libname = row.split()[0]
if libname == "Unpaired":
continue
libs.append(libname)
logging.debug("Found libraries: {0}".format(",".join(libs)))
cmds = []
for libname in libs:
cmd = "FastbQualbToFastq"
cmd += " HEAD_IN={0}.{1}.AB HEAD_OUT={1}".format(pf, libname)
cmd += " PAIRED=True PHRED_OFFSET=33"
if sim:
cmd += " SIMULATE_QUALS=True"
if pf == 'j':
cmd += " FLIP=True"
cmds.append((cmd, ))
m = Jobs(target=sh, args=cmds)
m.run()
for libname in libs:
cmd = "mv {0}.A.fastq {0}.1.fastq".format(libname)
sh(cmd)
cmd = "mv {0}.B.fastq {0}.2.fastq".format(libname)
sh(cmd) | python | def dump(args):
"""
%prog dump fastbfile
Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously
run allpaths folder.
"""
p = OptionParser(dump.__doc__)
p.add_option("--dir",
help="Working directory [default: %default]")
p.add_option("--nosim", default=False, action="store_true",
help="Do not simulate qual to 50 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastbfile, = args
d = opts.dir
if d:
from jcvi.assembly.preprocess import export_fastq
rc = "jump" in fastbfile
export_fastq(d, fastbfile, rc=rc)
return
sim = not opts.nosim
pf = "j" if "jump" in fastbfile else "f"
statsfile = "{0}.lib_stats".format(pf)
if op.exists(statsfile):
os.remove(statsfile)
cmd = "SplitReadsByLibrary READS_IN={0}".format(fastbfile)
cmd += " READS_OUT={0} QUALS=True".format(pf)
sh(cmd)
libs = []
fp = open(statsfile)
next(fp); next(fp) # skip two rows
for row in fp:
if row.strip() == "":
continue
libname = row.split()[0]
if libname == "Unpaired":
continue
libs.append(libname)
logging.debug("Found libraries: {0}".format(",".join(libs)))
cmds = []
for libname in libs:
cmd = "FastbQualbToFastq"
cmd += " HEAD_IN={0}.{1}.AB HEAD_OUT={1}".format(pf, libname)
cmd += " PAIRED=True PHRED_OFFSET=33"
if sim:
cmd += " SIMULATE_QUALS=True"
if pf == 'j':
cmd += " FLIP=True"
cmds.append((cmd, ))
m = Jobs(target=sh, args=cmds)
m.run()
for libname in libs:
cmd = "mv {0}.A.fastq {0}.1.fastq".format(libname)
sh(cmd)
cmd = "mv {0}.B.fastq {0}.2.fastq".format(libname)
sh(cmd) | [
"def",
"dump",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"dump",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--dir\"",
",",
"help",
"=",
"\"Working directory [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"--nosim\"",
",",
... | %prog dump fastbfile
Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously
run allpaths folder. | [
"%prog",
"dump",
"fastbfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allpaths.py#L120-L191 | train | 200,960 |
tanghaibao/jcvi | jcvi/assembly/allpaths.py | fixpairs | def fixpairs(args):
"""
%prog fixpairs pairsfile sep sd
Fix pairs library stats. This is sometime useful to modify library stats,
for example, the separation between paired reads after importing the data.
"""
p = OptionParser(fixpairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pairsfile, sep, sd = args
newpairsfile = pairsfile.rsplit(".", 1)[0] + ".new.pairs"
sep = int(sep)
sd = int(sd)
p = PairsFile(pairsfile)
p.fixLibraryStats(sep, sd)
p.write(newpairsfile) | python | def fixpairs(args):
"""
%prog fixpairs pairsfile sep sd
Fix pairs library stats. This is sometime useful to modify library stats,
for example, the separation between paired reads after importing the data.
"""
p = OptionParser(fixpairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pairsfile, sep, sd = args
newpairsfile = pairsfile.rsplit(".", 1)[0] + ".new.pairs"
sep = int(sep)
sd = int(sd)
p = PairsFile(pairsfile)
p.fixLibraryStats(sep, sd)
p.write(newpairsfile) | [
"def",
"fixpairs",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fixpairs",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
"(",
... | %prog fixpairs pairsfile sep sd
Fix pairs library stats. This is sometime useful to modify library stats,
for example, the separation between paired reads after importing the data. | [
"%prog",
"fixpairs",
"pairsfile",
"sep",
"sd"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allpaths.py#L194-L214 | train | 200,961 |
tanghaibao/jcvi | jcvi/assembly/allpaths.py | fill | def fill(args):
"""
%prog fill frag_reads_corr.fastb
Run FillFragments on `frag_reads_corr.fastb`.
"""
p = OptionParser(fill.__doc__)
p.add_option("--stretch", default=3, type="int",
help="MAX_STRETCH to pass to FillFragments [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastb, = args
assert fastb == "frag_reads_corr.fastb"
pcfile = "frag_reads_corr.k28.pc.info"
nthreads = " NUM_THREADS={0}".format(opts.cpus)
maxstretch = " MAX_STRETCH={0}".format(opts.stretch)
if need_update(fastb, pcfile):
cmd = "PathReads READS_IN=frag_reads_corr"
cmd += nthreads
sh(cmd)
filledfastb = "filled_reads.fastb"
if need_update(pcfile, filledfastb):
cmd = "FillFragments PAIRS_OUT=frag_reads_corr_cpd"
cmd += " PRECORRECT_LIBSTATS=True"
cmd += maxstretch
cmd += nthreads
sh(cmd)
filledfasta = "filled_reads.fasta"
if need_update(filledfastb, filledfasta):
cmd = "Fastb2Fasta IN=filled_reads.fastb OUT=filled_reads.fasta"
sh(cmd) | python | def fill(args):
"""
%prog fill frag_reads_corr.fastb
Run FillFragments on `frag_reads_corr.fastb`.
"""
p = OptionParser(fill.__doc__)
p.add_option("--stretch", default=3, type="int",
help="MAX_STRETCH to pass to FillFragments [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastb, = args
assert fastb == "frag_reads_corr.fastb"
pcfile = "frag_reads_corr.k28.pc.info"
nthreads = " NUM_THREADS={0}".format(opts.cpus)
maxstretch = " MAX_STRETCH={0}".format(opts.stretch)
if need_update(fastb, pcfile):
cmd = "PathReads READS_IN=frag_reads_corr"
cmd += nthreads
sh(cmd)
filledfastb = "filled_reads.fastb"
if need_update(pcfile, filledfastb):
cmd = "FillFragments PAIRS_OUT=frag_reads_corr_cpd"
cmd += " PRECORRECT_LIBSTATS=True"
cmd += maxstretch
cmd += nthreads
sh(cmd)
filledfasta = "filled_reads.fasta"
if need_update(filledfastb, filledfasta):
cmd = "Fastb2Fasta IN=filled_reads.fastb OUT=filled_reads.fasta"
sh(cmd) | [
"def",
"fill",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fill",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--stretch\"",
",",
"default",
"=",
"3",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"MAX_STRETCH to pass to FillFragments... | %prog fill frag_reads_corr.fastb
Run FillFragments on `frag_reads_corr.fastb`. | [
"%prog",
"fill",
"frag_reads_corr",
".",
"fastb"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allpaths.py#L217-L255 | train | 200,962 |
tanghaibao/jcvi | jcvi/assembly/allpaths.py | extract_pairs | def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False):
"""
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
"""
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags) | python | def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False):
"""
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
"""
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags) | [
"def",
"extract_pairs",
"(",
"fastqfile",
",",
"p1fw",
",",
"p2fw",
",",
"fragsfw",
",",
"p",
",",
"suffix",
"=",
"False",
")",
":",
"fp",
"=",
"open",
"(",
"fastqfile",
")",
"currentID",
"=",
"0",
"npairs",
"=",
"nfrags",
"=",
"0",
"for",
"x",
","... | Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance. | [
"Take",
"fastqfile",
"and",
"array",
"of",
"pair",
"ID",
"extract",
"adjacent",
"pairs",
"to",
"outfile",
".",
"Perform",
"check",
"on",
"numbers",
"when",
"done",
".",
"p1fw",
"p2fw",
"is",
"a",
"list",
"of",
"file",
"handles",
"each",
"for",
"one",
"en... | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allpaths.py#L258-L305 | train | 200,963 |
tanghaibao/jcvi | jcvi/assembly/allpaths.py | log | def log(args):
"""
%prog log logfile
Prepare a log of created files, ordered by their creation data. The purpose
for this script is to touch these files sequentially to reflect their build
order. On the JCVI scratch area, the files are touched regularly to avoid
getting deleted, losing their respective timestamps. However, this created a
problem for the make system adopted by ALLPATHS.
An example block to be extracted ==>
[PC] Calling PreCorrect to create 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_prec.fastb
[PC] $(RUN)/frag_reads_prec.qualb
[PC]
[PC] from 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_filt.fastb
[PC] $(RUN)/frag_reads_filt.qualb
"""
from jcvi.algorithms.graph import nx, topological_sort
p = OptionParser(log.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
g = nx.DiGraph()
logfile, = args
fp = open(logfile)
row = fp.readline()
incalling = False
basedb = {}
while row:
atoms = row.split()
if len(atoms) < 3:
row = fp.readline()
continue
tag, token, trailing = atoms[0], atoms[1], atoms[-1]
if trailing == 'file(s):':
numfiles = int(atoms[-2])
row = fp.readline()
assert row.strip() == tag
if token == "Calling" and not incalling:
createfiles = []
for i in xrange(numfiles):
row = fp.readline()
createfiles.append(row.split()[-1])
incalling = True
if token == "from" and incalling:
fromfiles = []
for i in xrange(numfiles):
row = fp.readline()
fromfiles.append(row.split()[-1])
for a in fromfiles:
for b in createfiles:
ba, bb = op.basename(a), op.basename(b)
basedb[ba] = a
basedb[bb] = b
g.add_edge(ba, bb)
incalling = False
if token == "ln":
fromfile, createfile = atoms[-2:]
ba, bb = op.basename(fromfile), op.basename(createfile)
#print ba, "-->", bb
if ba != bb:
g.add_edge(ba, bb)
row = fp.readline()
ts = [basedb[x] for x in topological_sort(g) if x in basedb]
print("\n".join(ts)) | python | def log(args):
"""
%prog log logfile
Prepare a log of created files, ordered by their creation data. The purpose
for this script is to touch these files sequentially to reflect their build
order. On the JCVI scratch area, the files are touched regularly to avoid
getting deleted, losing their respective timestamps. However, this created a
problem for the make system adopted by ALLPATHS.
An example block to be extracted ==>
[PC] Calling PreCorrect to create 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_prec.fastb
[PC] $(RUN)/frag_reads_prec.qualb
[PC]
[PC] from 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_filt.fastb
[PC] $(RUN)/frag_reads_filt.qualb
"""
from jcvi.algorithms.graph import nx, topological_sort
p = OptionParser(log.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
g = nx.DiGraph()
logfile, = args
fp = open(logfile)
row = fp.readline()
incalling = False
basedb = {}
while row:
atoms = row.split()
if len(atoms) < 3:
row = fp.readline()
continue
tag, token, trailing = atoms[0], atoms[1], atoms[-1]
if trailing == 'file(s):':
numfiles = int(atoms[-2])
row = fp.readline()
assert row.strip() == tag
if token == "Calling" and not incalling:
createfiles = []
for i in xrange(numfiles):
row = fp.readline()
createfiles.append(row.split()[-1])
incalling = True
if token == "from" and incalling:
fromfiles = []
for i in xrange(numfiles):
row = fp.readline()
fromfiles.append(row.split()[-1])
for a in fromfiles:
for b in createfiles:
ba, bb = op.basename(a), op.basename(b)
basedb[ba] = a
basedb[bb] = b
g.add_edge(ba, bb)
incalling = False
if token == "ln":
fromfile, createfile = atoms[-2:]
ba, bb = op.basename(fromfile), op.basename(createfile)
#print ba, "-->", bb
if ba != bb:
g.add_edge(ba, bb)
row = fp.readline()
ts = [basedb[x] for x in topological_sort(g) if x in basedb]
print("\n".join(ts)) | [
"def",
"log",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"algorithms",
".",
"graph",
"import",
"nx",
",",
"topological_sort",
"p",
"=",
"OptionParser",
"(",
"log",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
"... | %prog log logfile
Prepare a log of created files, ordered by their creation data. The purpose
for this script is to touch these files sequentially to reflect their build
order. On the JCVI scratch area, the files are touched regularly to avoid
getting deleted, losing their respective timestamps. However, this created a
problem for the make system adopted by ALLPATHS.
An example block to be extracted ==>
[PC] Calling PreCorrect to create 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_prec.fastb
[PC] $(RUN)/frag_reads_prec.qualb
[PC]
[PC] from 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_filt.fastb
[PC] $(RUN)/frag_reads_filt.qualb | [
"%prog",
"log",
"logfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allpaths.py#L480-L561 | train | 200,964 |
tanghaibao/jcvi | jcvi/utils/grouper.py | Grouper.join | def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(a, [a])
for arg in args:
set_b = mapping.get(arg)
if set_b is None:
set_a.append(arg)
mapping[arg] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a | python | def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(a, [a])
for arg in args:
set_b = mapping.get(arg)
if set_b is None:
set_a.append(arg)
mapping[arg] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a | [
"def",
"join",
"(",
"self",
",",
"a",
",",
"*",
"args",
")",
":",
"mapping",
"=",
"self",
".",
"_mapping",
"set_a",
"=",
"mapping",
".",
"setdefault",
"(",
"a",
",",
"[",
"a",
"]",
")",
"for",
"arg",
"in",
"args",
":",
"set_b",
"=",
"mapping",
... | Join given arguments into the same set. Accepts one or more arguments. | [
"Join",
"given",
"arguments",
"into",
"the",
"same",
"set",
".",
"Accepts",
"one",
"or",
"more",
"arguments",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/grouper.py#L44-L61 | train | 200,965 |
tanghaibao/jcvi | jcvi/utils/grouper.py | Grouper.joined | def joined(self, a, b):
"""
Returns True if a and b are members of the same set.
"""
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False | python | def joined(self, a, b):
"""
Returns True if a and b are members of the same set.
"""
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False | [
"def",
"joined",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"mapping",
"=",
"self",
".",
"_mapping",
"try",
":",
"return",
"mapping",
"[",
"a",
"]",
"is",
"mapping",
"[",
"b",
"]",
"except",
"KeyError",
":",
"return",
"False"
] | Returns True if a and b are members of the same set. | [
"Returns",
"True",
"if",
"a",
"and",
"b",
"are",
"members",
"of",
"the",
"same",
"set",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/grouper.py#L63-L71 | train | 200,966 |
tanghaibao/jcvi | jcvi/formats/excel.py | fromcsv | def fromcsv(args):
"""
%prog fromcsv csvfile
Convert csv file to EXCEL.
"""
from csv import reader
from xlwt import Workbook, easyxf
from jcvi.formats.base import flexible_cast
p = OptionParser(fromcsv.__doc__)
p.add_option("--noheader", default=False, action="store_true",
help="Do not treat the first row as header")
p.add_option("--rgb", default=-1, type="int",
help="Show RGB color box")
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
header = not opts.noheader
rgb = opts.rgb
excelfile = csvfile.rsplit(".", 1)[0] + ".xls"
data = []
for row in reader(open(csvfile), delimiter=opts.sep):
data.append(row)
w = Workbook()
s = w.add_sheet(op.basename(csvfile))
header_style = easyxf('font: bold on')
if header:
s.panes_frozen = True
s.horz_split_pos = 1
cm = ColorMatcher()
for i, row in enumerate(data):
for j, cell in enumerate(row):
cell = flexible_cast(cell)
if header and i == 0:
s.write(i, j, cell, header_style)
else:
if j == rgb:
cix = cm.match_color_index(cell)
color_style = easyxf('font: color_index {0}'.format(cix))
s.write(i, j, cell, color_style)
else:
s.write(i, j, cell)
w.save(excelfile)
logging.debug("File written to `{0}`.".format(excelfile))
return excelfile | python | def fromcsv(args):
"""
%prog fromcsv csvfile
Convert csv file to EXCEL.
"""
from csv import reader
from xlwt import Workbook, easyxf
from jcvi.formats.base import flexible_cast
p = OptionParser(fromcsv.__doc__)
p.add_option("--noheader", default=False, action="store_true",
help="Do not treat the first row as header")
p.add_option("--rgb", default=-1, type="int",
help="Show RGB color box")
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
header = not opts.noheader
rgb = opts.rgb
excelfile = csvfile.rsplit(".", 1)[0] + ".xls"
data = []
for row in reader(open(csvfile), delimiter=opts.sep):
data.append(row)
w = Workbook()
s = w.add_sheet(op.basename(csvfile))
header_style = easyxf('font: bold on')
if header:
s.panes_frozen = True
s.horz_split_pos = 1
cm = ColorMatcher()
for i, row in enumerate(data):
for j, cell in enumerate(row):
cell = flexible_cast(cell)
if header and i == 0:
s.write(i, j, cell, header_style)
else:
if j == rgb:
cix = cm.match_color_index(cell)
color_style = easyxf('font: color_index {0}'.format(cix))
s.write(i, j, cell, color_style)
else:
s.write(i, j, cell)
w.save(excelfile)
logging.debug("File written to `{0}`.".format(excelfile))
return excelfile | [
"def",
"fromcsv",
"(",
"args",
")",
":",
"from",
"csv",
"import",
"reader",
"from",
"xlwt",
"import",
"Workbook",
",",
"easyxf",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"flexible_cast",
"p",
"=",
"OptionParser",
"(",
"fromcsv",
".",
"__doc__... | %prog fromcsv csvfile
Convert csv file to EXCEL. | [
"%prog",
"fromcsv",
"csvfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/excel.py#L105-L159 | train | 200,967 |
tanghaibao/jcvi | jcvi/formats/excel.py | csv | def csv(args):
"""
%prog csv excelfile
Convert EXCEL to csv file.
"""
from xlrd import open_workbook
p = OptionParser(csv.__doc__)
p.set_sep(sep=',')
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
excelfile, = args
sep = opts.sep
csvfile = excelfile.rsplit(".", 1)[0] + ".csv"
wb = open_workbook(excelfile)
fw = open(csvfile, "w")
for s in wb.sheets():
print('Sheet:',s.name, file=sys.stderr)
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(s.cell(row, col).value)
print(sep.join(str(x) for x in values), file=fw) | python | def csv(args):
"""
%prog csv excelfile
Convert EXCEL to csv file.
"""
from xlrd import open_workbook
p = OptionParser(csv.__doc__)
p.set_sep(sep=',')
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
excelfile, = args
sep = opts.sep
csvfile = excelfile.rsplit(".", 1)[0] + ".csv"
wb = open_workbook(excelfile)
fw = open(csvfile, "w")
for s in wb.sheets():
print('Sheet:',s.name, file=sys.stderr)
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(s.cell(row, col).value)
print(sep.join(str(x) for x in values), file=fw) | [
"def",
"csv",
"(",
"args",
")",
":",
"from",
"xlrd",
"import",
"open_workbook",
"p",
"=",
"OptionParser",
"(",
"csv",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
"sep",
"=",
"','",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"arg... | %prog csv excelfile
Convert EXCEL to csv file. | [
"%prog",
"csv",
"excelfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/excel.py#L162-L188 | train | 200,968 |
tanghaibao/jcvi | jcvi/formats/excel.py | ColorMatcher.match_color_index | def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
"""
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result | python | def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
"""
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result | [
"def",
"match_color_index",
"(",
"self",
",",
"color",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"webcolors",
"import",
"color_diff",
"if",
"isinstance",
"(",
"color",
",",
"int",
")",
":",
"return",
"color",
"if",
"color",
":",
"if",
"isinstance",
"(... | Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color. | [
"Takes",
"an",
"R",
"G",
"B",
"string",
"or",
"wx",
".",
"Color",
"and",
"returns",
"a",
"matching",
"xlwt",
"color",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/excel.py#L59-L76 | train | 200,969 |
tanghaibao/jcvi | jcvi/formats/excel.py | ColorMatcher.get_unused_color | def get_unused_color(self):
"""Returns an xlwt color index that has not been previously returned by
this instance. Attempts to maximize the distance between the color and
all previously used colors.
"""
if not self.unused_colors:
# If we somehow run out of colors, reset the color matcher.
self.reset()
used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors]
result_color = max(self.unused_colors,
key=lambda c: min(self.color_distance(c, c2)
for c2 in used_colors))
result_index = self.xlwt_colors.index(result_color)
self.unused_colors.discard(result_color)
return result_index | python | def get_unused_color(self):
"""Returns an xlwt color index that has not been previously returned by
this instance. Attempts to maximize the distance between the color and
all previously used colors.
"""
if not self.unused_colors:
# If we somehow run out of colors, reset the color matcher.
self.reset()
used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors]
result_color = max(self.unused_colors,
key=lambda c: min(self.color_distance(c, c2)
for c2 in used_colors))
result_index = self.xlwt_colors.index(result_color)
self.unused_colors.discard(result_color)
return result_index | [
"def",
"get_unused_color",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"unused_colors",
":",
"# If we somehow run out of colors, reset the color matcher.",
"self",
".",
"reset",
"(",
")",
"used_colors",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"xlwt_co... | Returns an xlwt color index that has not been previously returned by
this instance. Attempts to maximize the distance between the color and
all previously used colors. | [
"Returns",
"an",
"xlwt",
"color",
"index",
"that",
"has",
"not",
"been",
"previously",
"returned",
"by",
"this",
"instance",
".",
"Attempts",
"to",
"maximize",
"the",
"distance",
"between",
"the",
"color",
"and",
"all",
"previously",
"used",
"colors",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/excel.py#L78-L92 | train | 200,970 |
tanghaibao/jcvi | jcvi/formats/vcf.py | validate | def validate(args):
"""
%prog validate input.vcf genome.fasta
Fasta validation of vcf file.
"""
import pyfasta
p = OptionParser(validate.__doc__)
p.add_option("--prefix", help="Add prefix to seqid")
opts, args = p.parse_args(args)
vcffile, fastafile = args
pf = opts.prefix
genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord)
fp = must_open(vcffile)
match_ref = match_alt = total = 0
for row in fp:
if row[0] == '#':
continue
seqid, pos, id, ref, alt = row.split()[:5]
total += 1
if pf:
seqid = pf + seqid
pos = int(pos)
if seqid not in genome:
continue
true_ref = genome[seqid][pos - 1]
if total % 100000 == 0:
print(total, "sites parsed", file=sys.stderr)
if ref == true_ref:
match_ref += 1
elif alt == true_ref:
match_alt += 1
logging.debug("Match REF: {}".format(percentage(match_ref, total)))
logging.debug("Match ALT: {}".format(percentage(match_alt, total))) | python | def validate(args):
"""
%prog validate input.vcf genome.fasta
Fasta validation of vcf file.
"""
import pyfasta
p = OptionParser(validate.__doc__)
p.add_option("--prefix", help="Add prefix to seqid")
opts, args = p.parse_args(args)
vcffile, fastafile = args
pf = opts.prefix
genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord)
fp = must_open(vcffile)
match_ref = match_alt = total = 0
for row in fp:
if row[0] == '#':
continue
seqid, pos, id, ref, alt = row.split()[:5]
total += 1
if pf:
seqid = pf + seqid
pos = int(pos)
if seqid not in genome:
continue
true_ref = genome[seqid][pos - 1]
if total % 100000 == 0:
print(total, "sites parsed", file=sys.stderr)
if ref == true_ref:
match_ref += 1
elif alt == true_ref:
match_alt += 1
logging.debug("Match REF: {}".format(percentage(match_ref, total)))
logging.debug("Match ALT: {}".format(percentage(match_alt, total))) | [
"def",
"validate",
"(",
"args",
")",
":",
"import",
"pyfasta",
"p",
"=",
"OptionParser",
"(",
"validate",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--prefix\"",
",",
"help",
"=",
"\"Add prefix to seqid\"",
")",
"opts",
",",
"args",
"=",
"p",
"... | %prog validate input.vcf genome.fasta
Fasta validation of vcf file. | [
"%prog",
"validate",
"input",
".",
"vcf",
"genome",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L127-L163 | train | 200,971 |
tanghaibao/jcvi | jcvi/formats/vcf.py | uniq | def uniq(args):
"""
%prog uniq vcffile
Retain only the first entry in vcf file.
"""
from six.moves.urllib.parse import parse_qs
p = OptionParser(uniq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = must_open(vcffile)
data = []
for row in fp:
if row[0] == '#':
print(row.strip())
continue
v = VcfLine(row)
data.append(v)
for pos, vv in groupby(data, lambda x: x.pos):
vv = list(vv)
if len(vv) == 1:
print(vv[0])
continue
bestv = max(vv, key=lambda x: float(parse_qs(x.info)["R2"][0]))
print(bestv) | python | def uniq(args):
"""
%prog uniq vcffile
Retain only the first entry in vcf file.
"""
from six.moves.urllib.parse import parse_qs
p = OptionParser(uniq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = must_open(vcffile)
data = []
for row in fp:
if row[0] == '#':
print(row.strip())
continue
v = VcfLine(row)
data.append(v)
for pos, vv in groupby(data, lambda x: x.pos):
vv = list(vv)
if len(vv) == 1:
print(vv[0])
continue
bestv = max(vv, key=lambda x: float(parse_qs(x.info)["R2"][0]))
print(bestv) | [
"def",
"uniq",
"(",
"args",
")",
":",
"from",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
"import",
"parse_qs",
"p",
"=",
"OptionParser",
"(",
"uniq",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"i... | %prog uniq vcffile
Retain only the first entry in vcf file. | [
"%prog",
"uniq",
"vcffile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L166-L196 | train | 200,972 |
tanghaibao/jcvi | jcvi/formats/vcf.py | sample | def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld)) | python | def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld)) | [
"def",
"sample",
"(",
"args",
")",
":",
"from",
"random",
"import",
"random",
"p",
"=",
"OptionParser",
"(",
"sample",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2"... | %prog sample vcffile 0.9
Sample subset of vcf file. | [
"%prog",
"sample",
"vcffile",
"0",
".",
"9"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L199-L233 | train | 200,973 |
tanghaibao/jcvi | jcvi/formats/vcf.py | fromimpute2 | def fromimpute2(args):
"""
%prog fromimpute2 impute2file fastafile 1
Convert impute2 output to vcf file. Imputed file looks like:
--- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002
"""
p = OptionParser(fromimpute2.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
impute2file, fastafile, chr = args
fasta = Fasta(fastafile)
print(get_vcfstanza(fastafile, fasta))
fp = open(impute2file)
seen = set()
for row in fp:
snp_id, rsid, pos, ref, alt, aa, ab, bb = row.split()
pos = int(pos)
if pos in seen:
continue
seen.add(pos)
code = max((float(aa), "0/0"), (float(ab), "0/1"), (float(bb), "1/1"))[-1]
tag = "PR" if snp_id == chr else "IM"
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", tag, \
"GT:GP", code + ":" + ",".join((aa, ab, bb))))) | python | def fromimpute2(args):
"""
%prog fromimpute2 impute2file fastafile 1
Convert impute2 output to vcf file. Imputed file looks like:
--- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002
"""
p = OptionParser(fromimpute2.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
impute2file, fastafile, chr = args
fasta = Fasta(fastafile)
print(get_vcfstanza(fastafile, fasta))
fp = open(impute2file)
seen = set()
for row in fp:
snp_id, rsid, pos, ref, alt, aa, ab, bb = row.split()
pos = int(pos)
if pos in seen:
continue
seen.add(pos)
code = max((float(aa), "0/0"), (float(ab), "0/1"), (float(bb), "1/1"))[-1]
tag = "PR" if snp_id == chr else "IM"
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", tag, \
"GT:GP", code + ":" + ",".join((aa, ab, bb))))) | [
"def",
"fromimpute2",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fromimpute2",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
... | %prog fromimpute2 impute2file fastafile 1
Convert impute2 output to vcf file. Imputed file looks like:
--- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002 | [
"%prog",
"fromimpute2",
"impute2file",
"fastafile",
"1"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L252-L281 | train | 200,974 |
tanghaibao/jcvi | jcvi/formats/vcf.py | refallele | def refallele(args):
"""
%prog refallele vcffile > out.refAllele
Make refAllele file which can be used to convert PLINK file to VCF file.
"""
p = OptionParser(refallele.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = open(vcffile)
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
marker = "{0}:{1}".format(*atoms[:2])
ref = atoms[3]
print("\t".join((marker, ref))) | python | def refallele(args):
"""
%prog refallele vcffile > out.refAllele
Make refAllele file which can be used to convert PLINK file to VCF file.
"""
p = OptionParser(refallele.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = open(vcffile)
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
marker = "{0}:{1}".format(*atoms[:2])
ref = atoms[3]
print("\t".join((marker, ref))) | [
"def",
"refallele",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"refallele",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"("... | %prog refallele vcffile > out.refAllele
Make refAllele file which can be used to convert PLINK file to VCF file. | [
"%prog",
"refallele",
"vcffile",
">",
"out",
".",
"refAllele"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L420-L440 | train | 200,975 |
tanghaibao/jcvi | jcvi/formats/vcf.py | location | def location(args):
"""
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
"""
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title) | python | def location(args):
"""
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
"""
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title) | [
"def",
"location",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"bed",
"import",
"BedLine",
"from",
"jcvi",
".",
"graphics",
".",
"histogram",
"import",
"stem_leaf_plot",
"p",
"=",
"OptionParser",
"(",
"location",
".",
"__doc__",
")",
"p",
... | %prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs. | [
"%prog",
"location",
"bedfile",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L443-L485 | train | 200,976 |
tanghaibao/jcvi | jcvi/formats/vcf.py | liftover | def liftover(args):
"""
%prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf
Lift over coordinates in vcf file.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--newid", default=False, action="store_true",
help="Make new identifiers")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
oldvcf, chainfile, newvcf = args
ul = UniqueLiftover(chainfile)
num_excluded = 0
fp = open(oldvcf)
fw = open(newvcf, "w")
for row in fp:
row = row.strip()
if row[0] == '#':
if row.startswith("##source="):
row = "##source={0}".format(__file__)
elif row.startswith("##reference="):
row = "##reference=hg38"
elif row.startswith("##contig="):
continue
print(row.strip(), file=fw)
continue
v = VcfLine(row)
# GRCh37.p2 has the same MT sequence as hg38 (but hg19 is different)
if v.seqid == "MT":
v.seqid = "chrM"
print(v, file=fw)
continue
try:
new_chrom, new_pos = ul.liftover_cpra(CM[v.seqid], v.pos)
except:
num_excluded +=1
continue
if new_chrom != None and new_pos != None:
v.seqid, v.pos = new_chrom, new_pos
if opts.newid:
v.rsid = "{0}:{1}".format(new_chrom.replace("chr", ""), new_pos)
print(v, file=fw)
else:
num_excluded +=1
logging.debug("Excluded {0}".format(num_excluded)) | python | def liftover(args):
"""
%prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf
Lift over coordinates in vcf file.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--newid", default=False, action="store_true",
help="Make new identifiers")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
oldvcf, chainfile, newvcf = args
ul = UniqueLiftover(chainfile)
num_excluded = 0
fp = open(oldvcf)
fw = open(newvcf, "w")
for row in fp:
row = row.strip()
if row[0] == '#':
if row.startswith("##source="):
row = "##source={0}".format(__file__)
elif row.startswith("##reference="):
row = "##reference=hg38"
elif row.startswith("##contig="):
continue
print(row.strip(), file=fw)
continue
v = VcfLine(row)
# GRCh37.p2 has the same MT sequence as hg38 (but hg19 is different)
if v.seqid == "MT":
v.seqid = "chrM"
print(v, file=fw)
continue
try:
new_chrom, new_pos = ul.liftover_cpra(CM[v.seqid], v.pos)
except:
num_excluded +=1
continue
if new_chrom != None and new_pos != None:
v.seqid, v.pos = new_chrom, new_pos
if opts.newid:
v.rsid = "{0}:{1}".format(new_chrom.replace("chr", ""), new_pos)
print(v, file=fw)
else:
num_excluded +=1
logging.debug("Excluded {0}".format(num_excluded)) | [
"def",
"liftover",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"liftover",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--newid\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Make new identifi... | %prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf
Lift over coordinates in vcf file. | [
"%prog",
"liftover",
"old",
".",
"vcf",
"hg19ToHg38",
".",
"over",
".",
"chain",
".",
"gz",
"new",
".",
"vcf"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L712-L764 | train | 200,977 |
tanghaibao/jcvi | jcvi/graphics/landscape.py | multilineplot | def multilineplot(args):
"""
%prog multilineplot fastafile chr1
Combine multiple line plots in one vertical stack
Inputs must be BED-formatted.
--lines: traditional line plots, useful for plotting feature freq
"""
p = OptionParser(multilineplot.__doc__)
p.add_option("--lines",
help="Features to plot in lineplot [default: %default]")
p.add_option("--colors",
help="List of colors matching number of input bed files")
p.add_option("--mode", default="span", choices=("span", "count", "score"),
help="Accumulate feature based on [default: %default]")
p.add_option("--binned", default=False, action="store_true",
help="Specify whether the input is already binned; " +
"if True, input files are considered to be binfiles")
p.add_option("--ymax", type="int", help="Set Y-axis max")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, chr = args
window, shift, subtract, merge = check_window_options(opts)
linebeds = []
colors = opts.colors
if opts.lines:
lines = opts.lines.split(",")
assert len(colors) == len(lines), "Number of chosen colors must match" + \
" number of input bed files"
linebeds = get_beds(lines, binned=opts.binned)
linebins = get_binfiles(linebeds, fastafile, shift, mode=opts.mode,
binned=opts.binned, merge=merge)
clen = Sizes(fastafile).mapping[chr]
nbins = get_nbins(clen, shift)
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["figure.figsize"] = iopts.w, iopts.h
fig, axarr = plt.subplots(nrows=len(lines))
if len(linebeds) == 1:
axarr = (axarr, )
fig.suptitle(latex(chr), color="darkslategray")
for i, ax in enumerate(axarr):
lineplot(ax, [linebins[i]], nbins, chr, window, shift, \
color="{0}{1}".format(colors[i], 'r'))
if opts.ymax:
ax.set_ylim(0, opts.ymax)
plt.subplots_adjust(hspace=0.5)
image_name = chr + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | python | def multilineplot(args):
"""
%prog multilineplot fastafile chr1
Combine multiple line plots in one vertical stack
Inputs must be BED-formatted.
--lines: traditional line plots, useful for plotting feature freq
"""
p = OptionParser(multilineplot.__doc__)
p.add_option("--lines",
help="Features to plot in lineplot [default: %default]")
p.add_option("--colors",
help="List of colors matching number of input bed files")
p.add_option("--mode", default="span", choices=("span", "count", "score"),
help="Accumulate feature based on [default: %default]")
p.add_option("--binned", default=False, action="store_true",
help="Specify whether the input is already binned; " +
"if True, input files are considered to be binfiles")
p.add_option("--ymax", type="int", help="Set Y-axis max")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, chr = args
window, shift, subtract, merge = check_window_options(opts)
linebeds = []
colors = opts.colors
if opts.lines:
lines = opts.lines.split(",")
assert len(colors) == len(lines), "Number of chosen colors must match" + \
" number of input bed files"
linebeds = get_beds(lines, binned=opts.binned)
linebins = get_binfiles(linebeds, fastafile, shift, mode=opts.mode,
binned=opts.binned, merge=merge)
clen = Sizes(fastafile).mapping[chr]
nbins = get_nbins(clen, shift)
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["figure.figsize"] = iopts.w, iopts.h
fig, axarr = plt.subplots(nrows=len(lines))
if len(linebeds) == 1:
axarr = (axarr, )
fig.suptitle(latex(chr), color="darkslategray")
for i, ax in enumerate(axarr):
lineplot(ax, [linebins[i]], nbins, chr, window, shift, \
color="{0}{1}".format(colors[i], 'r'))
if opts.ymax:
ax.set_ylim(0, opts.ymax)
plt.subplots_adjust(hspace=0.5)
image_name = chr + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"multilineplot",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"multilineplot",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--lines\"",
",",
"help",
"=",
"\"Features to plot in lineplot [default: %default]\"",
")",
"p",
".",
"add_option",
... | %prog multilineplot fastafile chr1
Combine multiple line plots in one vertical stack
Inputs must be BED-formatted.
--lines: traditional line plots, useful for plotting feature freq | [
"%prog",
"multilineplot",
"fastafile",
"chr1"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/landscape.py#L273-L334 | train | 200,978 |
tanghaibao/jcvi | jcvi/apps/emboss.py | _needle | def _needle(fa, fb, needlefile, a, b, results):
"""
Run single needle job
"""
from Bio.Emboss.Applications import NeedleCommandline
needle_cline = NeedleCommandline(asequence=fa, bsequence=fb,
gapopen=10, gapextend=0.5, outfile=needlefile)
stdout, stderr = needle_cline()
nh = NeedleHeader(needlefile)
FileShredder([fa, fb, needlefile], verbose=False)
r = ["\t".join((a, b, nh.identity, nh.score))]
results.extend(r) | python | def _needle(fa, fb, needlefile, a, b, results):
"""
Run single needle job
"""
from Bio.Emboss.Applications import NeedleCommandline
needle_cline = NeedleCommandline(asequence=fa, bsequence=fb,
gapopen=10, gapextend=0.5, outfile=needlefile)
stdout, stderr = needle_cline()
nh = NeedleHeader(needlefile)
FileShredder([fa, fb, needlefile], verbose=False)
r = ["\t".join((a, b, nh.identity, nh.score))]
results.extend(r) | [
"def",
"_needle",
"(",
"fa",
",",
"fb",
",",
"needlefile",
",",
"a",
",",
"b",
",",
"results",
")",
":",
"from",
"Bio",
".",
"Emboss",
".",
"Applications",
"import",
"NeedleCommandline",
"needle_cline",
"=",
"NeedleCommandline",
"(",
"asequence",
"=",
"fa"... | Run single needle job | [
"Run",
"single",
"needle",
"job"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/emboss.py#L38-L51 | train | 200,979 |
tanghaibao/jcvi | jcvi/apps/emboss.py | needle | def needle(args):
"""
%prog needle nw.pairs a.pep.fasta b.pep.fasta
Take protein pairs and needle them
Automatically writes output file `nw.scores`
"""
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(needle.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
manager = mp.Manager()
results = manager.list()
needle_pool = mp.Pool(processes=mp.cpu_count())
pairsfile, apep, bpep = args
afasta, bfasta = Fasta(apep), Fasta(bpep)
fp = must_open(pairsfile)
for i, row in enumerate(fp):
a, b = row.split()
a, b = afasta[a], bfasta[b]
fa, fb = must_open("{0}_{1}_a.fasta".format(pairsfile, i), "w"), \
must_open("{0}_{1}_b.fasta".format(pairsfile, i), "w")
SeqIO.write([a], fa, "fasta")
SeqIO.write([b], fb, "fasta")
fa.close()
fb.close()
needlefile = "{0}_{1}_ab.needle".format(pairsfile, i)
needle_pool.apply_async(_needle, \
(fa.name, fb.name, needlefile, a.id, b.id, results))
needle_pool.close()
needle_pool.join()
fp.close()
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
fw = must_open(scoresfile, "w")
for result in results:
print(result, file=fw)
fw.close() | python | def needle(args):
"""
%prog needle nw.pairs a.pep.fasta b.pep.fasta
Take protein pairs and needle them
Automatically writes output file `nw.scores`
"""
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(needle.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
manager = mp.Manager()
results = manager.list()
needle_pool = mp.Pool(processes=mp.cpu_count())
pairsfile, apep, bpep = args
afasta, bfasta = Fasta(apep), Fasta(bpep)
fp = must_open(pairsfile)
for i, row in enumerate(fp):
a, b = row.split()
a, b = afasta[a], bfasta[b]
fa, fb = must_open("{0}_{1}_a.fasta".format(pairsfile, i), "w"), \
must_open("{0}_{1}_b.fasta".format(pairsfile, i), "w")
SeqIO.write([a], fa, "fasta")
SeqIO.write([b], fb, "fasta")
fa.close()
fb.close()
needlefile = "{0}_{1}_ab.needle".format(pairsfile, i)
needle_pool.apply_async(_needle, \
(fa.name, fb.name, needlefile, a.id, b.id, results))
needle_pool.close()
needle_pool.join()
fp.close()
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
fw = must_open(scoresfile, "w")
for result in results:
print(result, file=fw)
fw.close() | [
"def",
"needle",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"fasta",
"import",
"Fasta",
",",
"SeqIO",
"p",
"=",
"OptionParser",
"(",
"needle",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
... | %prog needle nw.pairs a.pep.fasta b.pep.fasta
Take protein pairs and needle them
Automatically writes output file `nw.scores` | [
"%prog",
"needle",
"nw",
".",
"pairs",
"a",
".",
"pep",
".",
"fasta",
"b",
".",
"pep",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/emboss.py#L54-L100 | train | 200,980 |
tanghaibao/jcvi | jcvi/annotation/evm.py | maker | def maker(args):
"""
%prog maker maker.gff3 genome.fasta
Prepare EVM inputs by separating tracks from MAKER.
"""
from jcvi.formats.base import SetFile, FileShredder
A, T, P = "ABINITIO_PREDICTION", "TRANSCRIPT", "PROTEIN"
# Stores default weights and types
Registry = {\
"maker": (A, 5),
"augustus_masked": (A, 1),
"snap_masked": (A, 1),
"genemark": (A, 1),
"est2genome": (T, 5),
"est_gff": (T, 5),
"protein2genome": (P, 5),
"blastx": (P, 1)
}
p = OptionParser(maker.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, fastafile = args
types = "type.ids"
if need_update(gffile, types):
cmd = "cut -f2 -s {0} | sort -u".format(gffile)
sh(cmd, outfile=types)
types = SetFile(types)
reg = defaultdict(list)
weightsfile = "weights.txt"
contents = []
for s in types:
rs = s.split(":")[0]
if rs not in Registry:
continue
type, weight = Registry[rs]
reg[type].append(s)
contents.append("\t".join(str(x) for x in (type, s, weight)))
contents = "\n".join(sorted(contents))
write_file(weightsfile, contents)
evs = [x + ".gff" for x in (A, T, P)]
FileShredder(evs)
for type, tracks in reg.items():
for t in tracks:
cmd = "grep '\t{0}' {1} | grep -v '_match\t' >> {2}.gff".format(t, gffile, type)
sh(cmd)
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) | python | def maker(args):
"""
%prog maker maker.gff3 genome.fasta
Prepare EVM inputs by separating tracks from MAKER.
"""
from jcvi.formats.base import SetFile, FileShredder
A, T, P = "ABINITIO_PREDICTION", "TRANSCRIPT", "PROTEIN"
# Stores default weights and types
Registry = {\
"maker": (A, 5),
"augustus_masked": (A, 1),
"snap_masked": (A, 1),
"genemark": (A, 1),
"est2genome": (T, 5),
"est_gff": (T, 5),
"protein2genome": (P, 5),
"blastx": (P, 1)
}
p = OptionParser(maker.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, fastafile = args
types = "type.ids"
if need_update(gffile, types):
cmd = "cut -f2 -s {0} | sort -u".format(gffile)
sh(cmd, outfile=types)
types = SetFile(types)
reg = defaultdict(list)
weightsfile = "weights.txt"
contents = []
for s in types:
rs = s.split(":")[0]
if rs not in Registry:
continue
type, weight = Registry[rs]
reg[type].append(s)
contents.append("\t".join(str(x) for x in (type, s, weight)))
contents = "\n".join(sorted(contents))
write_file(weightsfile, contents)
evs = [x + ".gff" for x in (A, T, P)]
FileShredder(evs)
for type, tracks in reg.items():
for t in tracks:
cmd = "grep '\t{0}' {1} | grep -v '_match\t' >> {2}.gff".format(t, gffile, type)
sh(cmd)
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) | [
"def",
"maker",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"SetFile",
",",
"FileShredder",
"A",
",",
"T",
",",
"P",
"=",
"\"ABINITIO_PREDICTION\"",
",",
"\"TRANSCRIPT\"",
",",
"\"PROTEIN\"",
"# Stores default weights and types"... | %prog maker maker.gff3 genome.fasta
Prepare EVM inputs by separating tracks from MAKER. | [
"%prog",
"maker",
"maker",
".",
"gff3",
"genome",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/evm.py#L84-L145 | train | 200,981 |
tanghaibao/jcvi | jcvi/annotation/evm.py | tigrload | def tigrload(args):
"""
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
"""
p = OptionParser(tigrload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
db, ev_type = args
runfile = "load.sh"
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents) | python | def tigrload(args):
"""
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
"""
p = OptionParser(tigrload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
db, ev_type = args
runfile = "load.sh"
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents) | [
"def",
"tigrload",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"tigrload",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc. | [
"%prog",
"tigrload",
"db",
"ev_type"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/evm.py#L148-L165 | train | 200,982 |
tanghaibao/jcvi | jcvi/annotation/evm.py | pasa | def pasa(args):
"""
%prog pasa pasa_db fastafile
Run EVM in TIGR-only mode.
"""
p = OptionParser(pasa.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pasa_db, fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update(fastafile, termexons):
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db)
cmd += ' -g {0}'.format(fastafile)
sh(cmd)
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh(cmd, outfile=termexons)
return termexons | python | def pasa(args):
"""
%prog pasa pasa_db fastafile
Run EVM in TIGR-only mode.
"""
p = OptionParser(pasa.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pasa_db, fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update(fastafile, termexons):
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db)
cmd += ' -g {0}'.format(fastafile)
sh(cmd)
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh(cmd, outfile=termexons)
return termexons | [
"def",
"pasa",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"pasa",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",... | %prog pasa pasa_db fastafile
Run EVM in TIGR-only mode. | [
"%prog",
"pasa",
"pasa_db",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/evm.py#L168-L193 | train | 200,983 |
tanghaibao/jcvi | jcvi/annotation/evm.py | tigrprepare | def tigrprepare(args):
"""
%prog tigrprepare asmbl.fasta asmbl.ids db pasa.terminal_exons.gff3
Run EVM in TIGR-only mode.
"""
p = OptionParser(tigrprepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
fastafile, asmbl_id, db, pasa_db = args
if asmbl_id == 'all':
idsfile = fastafile + ".ids"
if need_update(fastafile, idsfile):
ids([fastafile, "-o", idsfile])
else:
idsfile = asmbl_id
oneid = open(idsfile).next().strip()
weightsfile = "weights.txt"
if need_update(idsfile, weightsfile):
cmd = "$EVM/TIGR-only/create_sample_weights_file.dbi"
cmd += " {0} {1} | tee weights.txt".format(db, oneid)
sh(cmd)
evs = ["gene_predictions.gff3", "transcript_alignments.gff3",
"protein_alignments.gff3"]
if need_update(weightsfile, evs):
cmd = "$EVM/TIGR-only/write_GFF3_files.dbi"
cmd += " --db {0} --asmbl_id {1} --weights {2}".\
format(db, idsfile, weightsfile)
sh(cmd)
evs[1] = fix_transcript()
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) | python | def tigrprepare(args):
"""
%prog tigrprepare asmbl.fasta asmbl.ids db pasa.terminal_exons.gff3
Run EVM in TIGR-only mode.
"""
p = OptionParser(tigrprepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
fastafile, asmbl_id, db, pasa_db = args
if asmbl_id == 'all':
idsfile = fastafile + ".ids"
if need_update(fastafile, idsfile):
ids([fastafile, "-o", idsfile])
else:
idsfile = asmbl_id
oneid = open(idsfile).next().strip()
weightsfile = "weights.txt"
if need_update(idsfile, weightsfile):
cmd = "$EVM/TIGR-only/create_sample_weights_file.dbi"
cmd += " {0} {1} | tee weights.txt".format(db, oneid)
sh(cmd)
evs = ["gene_predictions.gff3", "transcript_alignments.gff3",
"protein_alignments.gff3"]
if need_update(weightsfile, evs):
cmd = "$EVM/TIGR-only/write_GFF3_files.dbi"
cmd += " --db {0} --asmbl_id {1} --weights {2}".\
format(db, idsfile, weightsfile)
sh(cmd)
evs[1] = fix_transcript()
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) | [
"def",
"tigrprepare",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"tigrprepare",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"4",
":",
"sys",
".",
"exit",
... | %prog tigrprepare asmbl.fasta asmbl.ids db pasa.terminal_exons.gff3
Run EVM in TIGR-only mode. | [
"%prog",
"tigrprepare",
"asmbl",
".",
"fasta",
"asmbl",
".",
"ids",
"db",
"pasa",
".",
"terminal_exons",
".",
"gff3"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/evm.py#L220-L261 | train | 200,984 |
tanghaibao/jcvi | jcvi/annotation/qc.py | uniq | def uniq(args):
"""
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene") | python | def uniq(args):
"""
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene") | [
"def",
"uniq",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"uniq",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
"... | %prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length | [
"%prog",
"uniq",
"gffile",
"cdsfasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/qc.py#L34-L80 | train | 200,985 |
tanghaibao/jcvi | jcvi/annotation/qc.py | nmd | def nmd(args):
"""
%prog nmd gffile
Identify transcript variants which might be candidates for nonsense
mediated decay (NMD)
A transcript is considered to be a candidate for NMD when the CDS stop
codon is located more than 50nt upstream of terminal splice site donor
References:
http://www.nature.com/horizon/rna/highlights/figures/s2_spec1_f3.html
http://www.biomedcentral.com/1741-7007/7/23/figure/F1
"""
import __builtin__
from jcvi.utils.cbook import enumerate_reversed
p = OptionParser(nmd.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for gene in gff.features_of_type('gene', order_by=('seqid', 'start')):
_enumerate = __builtin__.enumerate if gene.strand == "-" else enumerate_reversed
for mrna in gff.children(gene, featuretype='mRNA', order_by=('start')):
tracker = dict()
tracker['exon'] = list(gff.children(mrna, featuretype='exon', order_by=('start')))
tracker['cds'] = [None] * len(tracker['exon'])
tcds_pos = None
for i, exon in _enumerate(tracker['exon']):
for cds in gff.region(region=exon, featuretype='CDS', completely_within=True):
if mrna.id in cds['Parent']:
tracker['cds'][i] = cds
tcds_pos = i
break
if tcds_pos: break
NMD, distance = False, 0
if (mrna.strand == "+" and tcds_pos + 1 < len(tracker['exon'])) \
or (mrna.strand == "-" and tcds_pos - 1 >= 0):
tcds = tracker['cds'][tcds_pos]
texon = tracker['exon'][tcds_pos]
PTC = tcds.end if mrna.strand == '+' else tcds.start
TDSS = texon.end if mrna.strand == '+' else texon.start
distance = abs(TDSS - PTC)
NMD = True if distance > 50 else False
print("\t".join(str(x) for x in (gene.id, mrna.id, \
gff.children_bp(mrna, child_featuretype='CDS'), distance, NMD)), file=fw)
fw.close() | python | def nmd(args):
"""
%prog nmd gffile
Identify transcript variants which might be candidates for nonsense
mediated decay (NMD)
A transcript is considered to be a candidate for NMD when the CDS stop
codon is located more than 50nt upstream of terminal splice site donor
References:
http://www.nature.com/horizon/rna/highlights/figures/s2_spec1_f3.html
http://www.biomedcentral.com/1741-7007/7/23/figure/F1
"""
import __builtin__
from jcvi.utils.cbook import enumerate_reversed
p = OptionParser(nmd.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for gene in gff.features_of_type('gene', order_by=('seqid', 'start')):
_enumerate = __builtin__.enumerate if gene.strand == "-" else enumerate_reversed
for mrna in gff.children(gene, featuretype='mRNA', order_by=('start')):
tracker = dict()
tracker['exon'] = list(gff.children(mrna, featuretype='exon', order_by=('start')))
tracker['cds'] = [None] * len(tracker['exon'])
tcds_pos = None
for i, exon in _enumerate(tracker['exon']):
for cds in gff.region(region=exon, featuretype='CDS', completely_within=True):
if mrna.id in cds['Parent']:
tracker['cds'][i] = cds
tcds_pos = i
break
if tcds_pos: break
NMD, distance = False, 0
if (mrna.strand == "+" and tcds_pos + 1 < len(tracker['exon'])) \
or (mrna.strand == "-" and tcds_pos - 1 >= 0):
tcds = tracker['cds'][tcds_pos]
texon = tracker['exon'][tcds_pos]
PTC = tcds.end if mrna.strand == '+' else tcds.start
TDSS = texon.end if mrna.strand == '+' else texon.start
distance = abs(TDSS - PTC)
NMD = True if distance > 50 else False
print("\t".join(str(x) for x in (gene.id, mrna.id, \
gff.children_bp(mrna, child_featuretype='CDS'), distance, NMD)), file=fw)
fw.close() | [
"def",
"nmd",
"(",
"args",
")",
":",
"import",
"__builtin__",
"from",
"jcvi",
".",
"utils",
".",
"cbook",
"import",
"enumerate_reversed",
"p",
"=",
"OptionParser",
"(",
"nmd",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
... | %prog nmd gffile
Identify transcript variants which might be candidates for nonsense
mediated decay (NMD)
A transcript is considered to be a candidate for NMD when the CDS stop
codon is located more than 50nt upstream of terminal splice site donor
References:
http://www.nature.com/horizon/rna/highlights/figures/s2_spec1_f3.html
http://www.biomedcentral.com/1741-7007/7/23/figure/F1 | [
"%prog",
"nmd",
"gffile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/qc.py#L255-L314 | train | 200,986 |
tanghaibao/jcvi | jcvi/compara/reconstruct.py | print_edges | def print_edges(G, bed, families):
"""
Instead of going through the graph construction, just print the edges.
"""
symbols = {'+': '>', '-': '<'}
for seqid, bs in bed.sub_beds():
prev_node, prev_strand = None, '+'
for b in bs:
accn = b.accn
strand = b.strand
node = "=".join(families[accn])
if prev_node:
print("{}{}--{}{}".format(prev_node, symbols[prev_strand],
symbols[strand], node))
prev_node, prev_strand = node, strand | python | def print_edges(G, bed, families):
"""
Instead of going through the graph construction, just print the edges.
"""
symbols = {'+': '>', '-': '<'}
for seqid, bs in bed.sub_beds():
prev_node, prev_strand = None, '+'
for b in bs:
accn = b.accn
strand = b.strand
node = "=".join(families[accn])
if prev_node:
print("{}{}--{}{}".format(prev_node, symbols[prev_strand],
symbols[strand], node))
prev_node, prev_strand = node, strand | [
"def",
"print_edges",
"(",
"G",
",",
"bed",
",",
"families",
")",
":",
"symbols",
"=",
"{",
"'+'",
":",
"'>'",
",",
"'-'",
":",
"'<'",
"}",
"for",
"seqid",
",",
"bs",
"in",
"bed",
".",
"sub_beds",
"(",
")",
":",
"prev_node",
",",
"prev_strand",
"... | Instead of going through the graph construction, just print the edges. | [
"Instead",
"of",
"going",
"through",
"the",
"graph",
"construction",
"just",
"print",
"the",
"edges",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L52-L66 | train | 200,987 |
tanghaibao/jcvi | jcvi/compara/reconstruct.py | adjgraph | def adjgraph(args):
"""
%prog adjgraph adjacency.txt subgraph.txt
Construct adjacency graph for graphviz. The file may look like sample below.
The lines with numbers are chromosomes with gene order information.
genome 0
chr 0
-1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360
chr 1
138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
"""
import pygraphviz as pgv
from jcvi.utils.iter import pairwise
from jcvi.formats.base import SetFile
p = OptionParser(adjgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infile, subgraph = args
subgraph = SetFile(subgraph)
subgraph = set(x.strip("-") for x in subgraph)
G = pgv.AGraph(strict=False) # allow multi-edge
SG = pgv.AGraph(strict=False)
palette = ("green", "magenta", "tomato", "peachpuff")
fp = open(infile)
genome_id = -1
key = 0
for row in fp:
if row.strip() == "":
continue
atoms = row.split()
tag = atoms[0]
if tag in ("ChrNumber", "chr"):
continue
if tag == "genome":
genome_id += 1
gcolor = palette[genome_id]
continue
nodeseq = []
for p in atoms:
np = p.strip("-")
nodeL, nodeR = np + "L", np + "R"
if p[0] == "-": # negative strand
nodeseq += [nodeR, nodeL]
else:
nodeseq += [nodeL, nodeR]
for a, b in pairwise(nodeseq):
G.add_edge(a, b, key, color=gcolor)
key += 1
na, nb = a[:-1], b[:-1]
if na not in subgraph and nb not in subgraph:
continue
SG.add_edge(a, b, key, color=gcolor)
G.graph_attr.update(dpi="300")
fw = open("graph.dot", "w")
G.write(fw)
fw.close()
fw = open("subgraph.dot", "w")
SG.write(fw)
fw.close() | python | def adjgraph(args):
"""
%prog adjgraph adjacency.txt subgraph.txt
Construct adjacency graph for graphviz. The file may look like sample below.
The lines with numbers are chromosomes with gene order information.
genome 0
chr 0
-1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360
chr 1
138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
"""
import pygraphviz as pgv
from jcvi.utils.iter import pairwise
from jcvi.formats.base import SetFile
p = OptionParser(adjgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infile, subgraph = args
subgraph = SetFile(subgraph)
subgraph = set(x.strip("-") for x in subgraph)
G = pgv.AGraph(strict=False) # allow multi-edge
SG = pgv.AGraph(strict=False)
palette = ("green", "magenta", "tomato", "peachpuff")
fp = open(infile)
genome_id = -1
key = 0
for row in fp:
if row.strip() == "":
continue
atoms = row.split()
tag = atoms[0]
if tag in ("ChrNumber", "chr"):
continue
if tag == "genome":
genome_id += 1
gcolor = palette[genome_id]
continue
nodeseq = []
for p in atoms:
np = p.strip("-")
nodeL, nodeR = np + "L", np + "R"
if p[0] == "-": # negative strand
nodeseq += [nodeR, nodeL]
else:
nodeseq += [nodeL, nodeR]
for a, b in pairwise(nodeseq):
G.add_edge(a, b, key, color=gcolor)
key += 1
na, nb = a[:-1], b[:-1]
if na not in subgraph and nb not in subgraph:
continue
SG.add_edge(a, b, key, color=gcolor)
G.graph_attr.update(dpi="300")
fw = open("graph.dot", "w")
G.write(fw)
fw.close()
fw = open("subgraph.dot", "w")
SG.write(fw)
fw.close() | [
"def",
"adjgraph",
"(",
"args",
")",
":",
"import",
"pygraphviz",
"as",
"pgv",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"pairwise",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"SetFile",
"p",
"=",
"OptionParser",
"(",
"adjgraph",
"... | %prog adjgraph adjacency.txt subgraph.txt
Construct adjacency graph for graphviz. The file may look like sample below.
The lines with numbers are chromosomes with gene order information.
genome 0
chr 0
-1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360
chr 1
138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143 | [
"%prog",
"adjgraph",
"adjacency",
".",
"txt",
"subgraph",
".",
"txt"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L111-L186 | train | 200,988 |
tanghaibao/jcvi | jcvi/compara/reconstruct.py | pairs | def pairs(args):
"""
%prog pairs anchorsfile prefix
Convert anchorsfile to pairsfile.
"""
p = OptionParser(pairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
anchorfile, prefix = args
outfile = prefix + ".pairs"
fw = open(outfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
npairs = 0
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
lines = []
for q, s, score in block:
npairs += 1
score = score.replace('L', '')
lines.append("\t".join((q, s, score, block_id)))
print("\n".join(sorted(lines)), file=fw)
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".
format(npairs, outfile)) | python | def pairs(args):
"""
%prog pairs anchorsfile prefix
Convert anchorsfile to pairsfile.
"""
p = OptionParser(pairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
anchorfile, prefix = args
outfile = prefix + ".pairs"
fw = open(outfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
npairs = 0
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
lines = []
for q, s, score in block:
npairs += 1
score = score.replace('L', '')
lines.append("\t".join((q, s, score, block_id)))
print("\n".join(sorted(lines)), file=fw)
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".
format(npairs, outfile)) | [
"def",
"pairs",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"pairs",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not... | %prog pairs anchorsfile prefix
Convert anchorsfile to pairsfile. | [
"%prog",
"pairs",
"anchorsfile",
"prefix"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L189-L220 | train | 200,989 |
tanghaibao/jcvi | jcvi/compara/reconstruct.py | zipbed | def zipbed(args):
"""
%prog zipbed species.bed collinear.anchors
Build ancestral contig from collinear blocks. For example, to build pre-rho
order, use `zipbed rice.bed rice.rice.1x1.collinear.anchors`. The algorithms
proceeds by interleaving the genes together.
"""
p = OptionParser(zipbed.__doc__)
p.add_option("--prefix", default="b",
help="Prefix for the new seqid [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, anchorfile = args
prefix = opts.prefix
bed = Bed(bedfile)
order = bed.order
newbedfile = prefix + ".bed"
fw = open(newbedfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
pairs = []
for q, s, score in block:
qi, q = order[q]
si, s = order[s]
pairs.append((qi, si))
newbed = list(interleave_pairs(pairs))
for i, b in enumerate(newbed):
accn = bed[b].accn
print("\t".join(str(x)
for x in (block_id, i, i + 1, accn)), file=fw)
logging.debug("Reconstructed bedfile written to `{0}`.".format(newbedfile)) | python | def zipbed(args):
"""
%prog zipbed species.bed collinear.anchors
Build ancestral contig from collinear blocks. For example, to build pre-rho
order, use `zipbed rice.bed rice.rice.1x1.collinear.anchors`. The algorithms
proceeds by interleaving the genes together.
"""
p = OptionParser(zipbed.__doc__)
p.add_option("--prefix", default="b",
help="Prefix for the new seqid [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, anchorfile = args
prefix = opts.prefix
bed = Bed(bedfile)
order = bed.order
newbedfile = prefix + ".bed"
fw = open(newbedfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
pairs = []
for q, s, score in block:
qi, q = order[q]
si, s = order[s]
pairs.append((qi, si))
newbed = list(interleave_pairs(pairs))
for i, b in enumerate(newbed):
accn = bed[b].accn
print("\t".join(str(x)
for x in (block_id, i, i + 1, accn)), file=fw)
logging.debug("Reconstructed bedfile written to `{0}`.".format(newbedfile)) | [
"def",
"zipbed",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"zipbed",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--prefix\"",
",",
"default",
"=",
"\"b\"",
",",
"help",
"=",
"\"Prefix for the new seqid [default: %default]\"",
")",
"opts",
... | %prog zipbed species.bed collinear.anchors
Build ancestral contig from collinear blocks. For example, to build pre-rho
order, use `zipbed rice.bed rice.rice.1x1.collinear.anchors`. The algorithms
proceeds by interleaving the genes together. | [
"%prog",
"zipbed",
"species",
".",
"bed",
"collinear",
".",
"anchors"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L241-L280 | train | 200,990 |
tanghaibao/jcvi | jcvi/compara/reconstruct.py | collinear | def collinear(args):
"""
%prog collinear a.b.anchors
Reduce synteny blocks to strictly collinear, use dynamic programming in a
procedure similar to DAGchainer.
"""
p = OptionParser(collinear.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
af = AnchorFile(anchorfile)
newanchorfile = anchorfile.rsplit(".", 1)[0] + ".collinear.anchors"
fw = open(newanchorfile, "w")
blocks = af.blocks
for block in blocks:
print("#" * 3, file=fw)
iblock = []
for q, s, score in block:
qi, q = qorder[q]
si, s = sorder[s]
score = int(long(score))
iblock.append([qi, si, score])
block = get_collinear(iblock)
for q, s, score in block:
q = qbed[q].accn
s = sbed[s].accn
print("\t".join((q, s, str(score))), file=fw)
fw.close() | python | def collinear(args):
"""
%prog collinear a.b.anchors
Reduce synteny blocks to strictly collinear, use dynamic programming in a
procedure similar to DAGchainer.
"""
p = OptionParser(collinear.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
af = AnchorFile(anchorfile)
newanchorfile = anchorfile.rsplit(".", 1)[0] + ".collinear.anchors"
fw = open(newanchorfile, "w")
blocks = af.blocks
for block in blocks:
print("#" * 3, file=fw)
iblock = []
for q, s, score in block:
qi, q = qorder[q]
si, s = sorder[s]
score = int(long(score))
iblock.append([qi, si, score])
block = get_collinear(iblock)
for q, s, score in block:
q = qbed[q].accn
s = sbed[s].accn
print("\t".join((q, s, str(score))), file=fw)
fw.close() | [
"def",
"collinear",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"collinear",
".",
"__doc__",
")",
"p",
".",
"set_beds",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"... | %prog collinear a.b.anchors
Reduce synteny blocks to strictly collinear, use dynamic programming in a
procedure similar to DAGchainer. | [
"%prog",
"collinear",
"a",
".",
"b",
".",
"anchors"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L340-L379 | train | 200,991 |
tanghaibao/jcvi | jcvi/variation/phase.py | counts | def counts(args):
"""
%prog counts vcffile
Collect allele counts from RO and AO fields.
"""
p = OptionParser(counts.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
vcf_reader = vcf.Reader(open(vcffile))
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
for sample in r.samples:
ro = sample["RO"]
ao = sample["AO"]
print("\t".join(str(x) for x in (v, ro, ao))) | python | def counts(args):
"""
%prog counts vcffile
Collect allele counts from RO and AO fields.
"""
p = OptionParser(counts.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
vcf_reader = vcf.Reader(open(vcffile))
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
for sample in r.samples:
ro = sample["RO"]
ao = sample["AO"]
print("\t".join(str(x) for x in (v, ro, ao))) | [
"def",
"counts",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"counts",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"n... | %prog counts vcffile
Collect allele counts from RO and AO fields. | [
"%prog",
"counts",
"vcffile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/phase.py#L53-L74 | train | 200,992 |
tanghaibao/jcvi | jcvi/variation/phase.py | prepare | def prepare(args):
"""
%prog prepare vcffile bamfile
Convert vcf and bam to variant list. Inputs are:
- vcffile: contains the positions of variants
- bamfile: contains the reads that hold the variants
Outputs:
- reads_to_phase: phasing for each read
- variants_to_phase: in format of phased vcf
"""
p = OptionParser(prepare.__doc__)
p.add_option("--accuracy", default=.85,
help="Sequencing per-base accuracy")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, bamfile = args
right = "{:.2f}".format(opts.accuracy)
wrong = "{:.2f}".format(1 - opts.accuracy)
vcf_reader = vcf.Reader(open(vcffile))
variants = []
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
variants.append(v)
logging.debug("A total of {} bi-allelic SNVs imported from `{}`".\
format(len(variants), vcffile))
bamfile = pysam.AlignmentFile(bamfile, "rb")
for v in variants:
pos = v.pos - 1
for column in bamfile.pileup(v.chr, pos, pos + 1, truncate=True):
for read in column.pileups:
query_position = read.query_position
if query_position is None:
continue
read_name = read.alignment.query_name
query_base = read.alignment.query_sequence[query_position]
a, b = v.alleles
if query_base == a:
other_base = b
elif query_base == b:
other_base = a
else:
continue
print(" ".join(str(x) for x in \
(v, read_name, query_base, right, other_base, wrong))) | python | def prepare(args):
"""
%prog prepare vcffile bamfile
Convert vcf and bam to variant list. Inputs are:
- vcffile: contains the positions of variants
- bamfile: contains the reads that hold the variants
Outputs:
- reads_to_phase: phasing for each read
- variants_to_phase: in format of phased vcf
"""
p = OptionParser(prepare.__doc__)
p.add_option("--accuracy", default=.85,
help="Sequencing per-base accuracy")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, bamfile = args
right = "{:.2f}".format(opts.accuracy)
wrong = "{:.2f}".format(1 - opts.accuracy)
vcf_reader = vcf.Reader(open(vcffile))
variants = []
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
variants.append(v)
logging.debug("A total of {} bi-allelic SNVs imported from `{}`".\
format(len(variants), vcffile))
bamfile = pysam.AlignmentFile(bamfile, "rb")
for v in variants:
pos = v.pos - 1
for column in bamfile.pileup(v.chr, pos, pos + 1, truncate=True):
for read in column.pileups:
query_position = read.query_position
if query_position is None:
continue
read_name = read.alignment.query_name
query_base = read.alignment.query_sequence[query_position]
a, b = v.alleles
if query_base == a:
other_base = b
elif query_base == b:
other_base = a
else:
continue
print(" ".join(str(x) for x in \
(v, read_name, query_base, right, other_base, wrong))) | [
"def",
"prepare",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"prepare",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--accuracy\"",
",",
"default",
"=",
".85",
",",
"help",
"=",
"\"Sequencing per-base accuracy\"",
")",
"opts",
",",
"args... | %prog prepare vcffile bamfile
Convert vcf and bam to variant list. Inputs are:
- vcffile: contains the positions of variants
- bamfile: contains the reads that hold the variants
Outputs:
- reads_to_phase: phasing for each read
- variants_to_phase: in format of phased vcf | [
"%prog",
"prepare",
"vcffile",
"bamfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/phase.py#L77-L129 | train | 200,993 |
tanghaibao/jcvi | jcvi/variation/phase.py | CPRA.is_valid | def is_valid(self):
""" Only retain SNPs or single indels, and are bi-allelic
"""
return len(self.ref) == 1 and \
len(self.alt) == 1 and \
len(self.alt[0]) == 1 | python | def is_valid(self):
""" Only retain SNPs or single indels, and are bi-allelic
"""
return len(self.ref) == 1 and \
len(self.alt) == 1 and \
len(self.alt[0]) == 1 | [
"def",
"is_valid",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"ref",
")",
"==",
"1",
"and",
"len",
"(",
"self",
".",
"alt",
")",
"==",
"1",
"and",
"len",
"(",
"self",
".",
"alt",
"[",
"0",
"]",
")",
"==",
"1"
] | Only retain SNPs or single indels, and are bi-allelic | [
"Only",
"retain",
"SNPs",
"or",
"single",
"indels",
"and",
"are",
"bi",
"-",
"allelic"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/phase.py#L28-L33 | train | 200,994 |
tanghaibao/jcvi | jcvi/utils/natsort.py | _number_finder | def _number_finder(s, regex, numconv):
"""Helper to split numbers"""
# Split. If there are no splits, return now
s = regex.split(s)
if len(s) == 1:
return tuple(s)
# Now convert the numbers to numbers, and leave strings as strings
s = remove_empty(s)
for i in range(len(s)):
try:
s[i] = numconv(s[i])
except ValueError:
pass
# If the list begins with a number, lead with an empty string.
# This is used to get around the "unorderable types" issue.
if not isinstance(s[0], six.string_types):
return [''] + s
else:
return s | python | def _number_finder(s, regex, numconv):
"""Helper to split numbers"""
# Split. If there are no splits, return now
s = regex.split(s)
if len(s) == 1:
return tuple(s)
# Now convert the numbers to numbers, and leave strings as strings
s = remove_empty(s)
for i in range(len(s)):
try:
s[i] = numconv(s[i])
except ValueError:
pass
# If the list begins with a number, lead with an empty string.
# This is used to get around the "unorderable types" issue.
if not isinstance(s[0], six.string_types):
return [''] + s
else:
return s | [
"def",
"_number_finder",
"(",
"s",
",",
"regex",
",",
"numconv",
")",
":",
"# Split. If there are no splits, return now",
"s",
"=",
"regex",
".",
"split",
"(",
"s",
")",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"return",
"tuple",
"(",
"s",
")",
"# ... | Helper to split numbers | [
"Helper",
"to",
"split",
"numbers"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/natsort.py#L118-L139 | train | 200,995 |
tanghaibao/jcvi | jcvi/utils/natsort.py | index_natsorted | def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
"""\
Sorts a sequence naturally, but returns a list of sorted the
indeces and not the sorted list.
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
['num2', 'num3', 'num5']
>>> [b[i] for i in index]
['baz', 'foo', 'bar']
>>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> index_natsorted(c, key=itemgetter(1))
[2, 0, 1]
"""
from operator import itemgetter
item1 = itemgetter(1)
# Pair the index and sequence together, then sort by
index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)]
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
return [x[0] for x in index_seq_pair] | python | def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
"""\
Sorts a sequence naturally, but returns a list of sorted the
indeces and not the sorted list.
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
['num2', 'num3', 'num5']
>>> [b[i] for i in index]
['baz', 'foo', 'bar']
>>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> index_natsorted(c, key=itemgetter(1))
[2, 0, 1]
"""
from operator import itemgetter
item1 = itemgetter(1)
# Pair the index and sequence together, then sort by
index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)]
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
return [x[0] for x in index_seq_pair] | [
"def",
"index_natsorted",
"(",
"seq",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
",",
"number_type",
"=",
"float",
",",
"signed",
"=",
"True",
",",
"exp",
"=",
"True",
")",
":",
"from",
"operator",
"import",
"itemgetter",
"item1",
"=",
"itemgetter",
"("... | \
Sorts a sequence naturally, but returns a list of sorted the
indeces and not the sorted list.
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
['num2', 'num3', 'num5']
>>> [b[i] for i in index]
['baz', 'foo', 'bar']
>>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> index_natsorted(c, key=itemgetter(1))
[2, 0, 1] | [
"\\",
"Sorts",
"a",
"sequence",
"naturally",
"but",
"returns",
"a",
"list",
"of",
"sorted",
"the",
"indeces",
"and",
"not",
"the",
"sorted",
"list",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/natsort.py#L248-L276 | train | 200,996 |
tanghaibao/jcvi | jcvi/graphics/grabseeds.py | batchseeds | def batchseeds(args):
"""
%prog batchseeds folder
Extract seed metrics for each image in a directory.
"""
from jcvi.formats.pdf import cat
xargs = args[1:]
p = OptionParser(batchseeds.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
folder = folder.rstrip('/')
outdir = folder + "-debug"
outfile = folder + "-output.tsv"
assert op.isdir(folder)
images = []
jsonfile = opts.calibrate or op.join(folder, "calibrate.json")
if not op.exists(jsonfile):
jsonfile = None
for im in iglob(folder, "*.jpg,*.JPG,*.png"):
if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")):
continue
if op.basename(im).startswith("calibrate"):
continue
images.append(im)
fw = must_open(outfile, 'w')
print(Seed.header(calibrate=jsonfile), file=fw)
nseeds = 0
for im in images:
imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs
if jsonfile:
imargs += ["--calibrate={0}".format(jsonfile)]
objects = seeds(imargs)
for o in objects:
print(o, file=fw)
nseeds += len(objects)
fw.close()
logging.debug("Processed {0} images.".format(len(images)))
logging.debug("A total of {0} objects written to `{1}`.".\
format(nseeds, outfile))
pdfs = iglob(outdir, "*.pdf")
outpdf = folder + "-output.pdf"
cat(pdfs + ["--outfile={0}".format(outpdf)])
logging.debug("Debugging information written to `{0}`.".format(outpdf))
return outfile | python | def batchseeds(args):
"""
%prog batchseeds folder
Extract seed metrics for each image in a directory.
"""
from jcvi.formats.pdf import cat
xargs = args[1:]
p = OptionParser(batchseeds.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
folder = folder.rstrip('/')
outdir = folder + "-debug"
outfile = folder + "-output.tsv"
assert op.isdir(folder)
images = []
jsonfile = opts.calibrate or op.join(folder, "calibrate.json")
if not op.exists(jsonfile):
jsonfile = None
for im in iglob(folder, "*.jpg,*.JPG,*.png"):
if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")):
continue
if op.basename(im).startswith("calibrate"):
continue
images.append(im)
fw = must_open(outfile, 'w')
print(Seed.header(calibrate=jsonfile), file=fw)
nseeds = 0
for im in images:
imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs
if jsonfile:
imargs += ["--calibrate={0}".format(jsonfile)]
objects = seeds(imargs)
for o in objects:
print(o, file=fw)
nseeds += len(objects)
fw.close()
logging.debug("Processed {0} images.".format(len(images)))
logging.debug("A total of {0} objects written to `{1}`.".\
format(nseeds, outfile))
pdfs = iglob(outdir, "*.pdf")
outpdf = folder + "-output.pdf"
cat(pdfs + ["--outfile={0}".format(outpdf)])
logging.debug("Debugging information written to `{0}`.".format(outpdf))
return outfile | [
"def",
"batchseeds",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"pdf",
"import",
"cat",
"xargs",
"=",
"args",
"[",
"1",
":",
"]",
"p",
"=",
"OptionParser",
"(",
"batchseeds",
".",
"__doc__",
")",
"opts",
",",
"args",
",",
"iopts",
... | %prog batchseeds folder
Extract seed metrics for each image in a directory. | [
"%prog",
"batchseeds",
"folder"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/grabseeds.py#L255-L307 | train | 200,997 |
tanghaibao/jcvi | jcvi/formats/bed.py | filterbedgraph | def filterbedgraph(args):
"""
%prog filterbedgraph a.bedgraph 1
Filter the bedGraph, typically from the gem-mappability pipeline. Unique
regions are 1, two copies .5, etc.
"""
p = OptionParser(filterbedgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedgraphfile, cutoff = args
c = float(cutoff)
fp = open(bedgraphfile)
pf = bedgraphfile.rsplit(".", 1)[0]
filteredbed = pf + ".filtered-{}.bed".format(cutoff)
fw = open(filteredbed, "w")
nfiltered = ntotal = 0
for row in fp:
b = BedLine(row)
ntotal += 1
if float(b.accn) >= c:
print(b, file=fw)
nfiltered += 1
fw.close()
logging.debug("A total of {} intervals (score >= {}) written to `{}`".\
format(percentage(nfiltered, ntotal), cutoff, filteredbed))
mergeBed(filteredbed, sorted=True, delim=None) | python | def filterbedgraph(args):
"""
%prog filterbedgraph a.bedgraph 1
Filter the bedGraph, typically from the gem-mappability pipeline. Unique
regions are 1, two copies .5, etc.
"""
p = OptionParser(filterbedgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedgraphfile, cutoff = args
c = float(cutoff)
fp = open(bedgraphfile)
pf = bedgraphfile.rsplit(".", 1)[0]
filteredbed = pf + ".filtered-{}.bed".format(cutoff)
fw = open(filteredbed, "w")
nfiltered = ntotal = 0
for row in fp:
b = BedLine(row)
ntotal += 1
if float(b.accn) >= c:
print(b, file=fw)
nfiltered += 1
fw.close()
logging.debug("A total of {} intervals (score >= {}) written to `{}`".\
format(percentage(nfiltered, ntotal), cutoff, filteredbed))
mergeBed(filteredbed, sorted=True, delim=None) | [
"def",
"filterbedgraph",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"filterbedgraph",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"ex... | %prog filterbedgraph a.bedgraph 1
Filter the bedGraph, typically from the gem-mappability pipeline. Unique
regions are 1, two copies .5, etc. | [
"%prog",
"filterbedgraph",
"a",
".",
"bedgraph",
"1"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L430-L460 | train | 200,998 |
tanghaibao/jcvi | jcvi/formats/bed.py | tiling | def tiling(args):
"""
%prog tiling bedfile
Compute minimum tiling path using as few clones as possible. Implemented
with dynamic programming. Greedy algorithm may also work according a
stackoverflow source.
"""
p = OptionParser(tiling.__doc__)
p.add_option("--overlap", default=3000, type="int",
help="Minimum amount of overlaps required")
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
ov = opts.overlap
bed = Bed(bedfile)
inf = len(bed)
selected = Bed()
for seqid, sbed in bed.sub_beds():
g = Grouper()
current = sbed[0]
# Partition connected features
for a in sbed:
g.join(a)
# requires a real overlap
if a.start < current.end - ov:
g.join(a, current)
if a.end > current.end:
current = a
# Process per partition
for gbed in g:
end = max(x.end for x in gbed)
gbed.sort(key=lambda x: (x.start, -x.end))
entries = len(gbed)
counts = [inf] * entries
counts[0] = 1
traceback = [-1] * entries
for i, a in enumerate(gbed):
for j in xrange(i + 1, entries):
b = gbed[j]
if b.start >= a.end - ov:
break
# Two ranges overlap!
if counts[i] + 1 < counts[j]:
counts[j] = counts[i] + 1
traceback[j] = i
endi = [i for i, a in enumerate(gbed) if a.end == end]
last = min((traceback[i], i) for i in endi)[1]
chain = []
while last != -1:
chain.append(last)
last = traceback[last]
chain = chain[::-1]
selected.extend([gbed[x] for x in chain])
if opts.verbose:
print(counts)
print(traceback)
print(chain)
print("\n".join(str(x) for x in gbed))
print("*" * 30)
print("\n".join(str(gbed[x]) for x in chain))
print()
tilingbedfile = bedfile.rsplit(".", 1)[0] + ".tiling.bed"
selected.print_to_file(filename=tilingbedfile, sorted=True)
logging.debug("A total of {} tiling features written to `{}`"\
.format(len(selected), tilingbedfile)) | python | def tiling(args):
"""
%prog tiling bedfile
Compute minimum tiling path using as few clones as possible. Implemented
with dynamic programming. Greedy algorithm may also work according a
stackoverflow source.
"""
p = OptionParser(tiling.__doc__)
p.add_option("--overlap", default=3000, type="int",
help="Minimum amount of overlaps required")
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
ov = opts.overlap
bed = Bed(bedfile)
inf = len(bed)
selected = Bed()
for seqid, sbed in bed.sub_beds():
g = Grouper()
current = sbed[0]
# Partition connected features
for a in sbed:
g.join(a)
# requires a real overlap
if a.start < current.end - ov:
g.join(a, current)
if a.end > current.end:
current = a
# Process per partition
for gbed in g:
end = max(x.end for x in gbed)
gbed.sort(key=lambda x: (x.start, -x.end))
entries = len(gbed)
counts = [inf] * entries
counts[0] = 1
traceback = [-1] * entries
for i, a in enumerate(gbed):
for j in xrange(i + 1, entries):
b = gbed[j]
if b.start >= a.end - ov:
break
# Two ranges overlap!
if counts[i] + 1 < counts[j]:
counts[j] = counts[i] + 1
traceback[j] = i
endi = [i for i, a in enumerate(gbed) if a.end == end]
last = min((traceback[i], i) for i in endi)[1]
chain = []
while last != -1:
chain.append(last)
last = traceback[last]
chain = chain[::-1]
selected.extend([gbed[x] for x in chain])
if opts.verbose:
print(counts)
print(traceback)
print(chain)
print("\n".join(str(x) for x in gbed))
print("*" * 30)
print("\n".join(str(gbed[x]) for x in chain))
print()
tilingbedfile = bedfile.rsplit(".", 1)[0] + ".tiling.bed"
selected.print_to_file(filename=tilingbedfile, sorted=True)
logging.debug("A total of {} tiling features written to `{}`"\
.format(len(selected), tilingbedfile)) | [
"def",
"tiling",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"tiling",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--overlap\"",
",",
"default",
"=",
"3000",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Minimum amount of overlaps re... | %prog tiling bedfile
Compute minimum tiling path using as few clones as possible. Implemented
with dynamic programming. Greedy algorithm may also work according a
stackoverflow source. | [
"%prog",
"tiling",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L463-L536 | train | 200,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.