repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Sliim/soundcloud-syncer | ssyncer/suser.py | suser.get_tracks | python | def get_tracks(self, offset=0, limit=50):
response = self.client.get(
self.client.USER_TRACKS % (self.name, offset, limit))
return self._parse_response(response, strack) | Get user's tracks. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/suser.py#L44-L48 | [
"def get(self, uri):\n \"\"\" Send a request to given uri. \"\"\"\n return self.send_request(\n \"{0}://{1}:{2}{3}{4}\".format(\n self.get_protocol(),\n self.host,\n self.port,\n uri,\n self.client_id\n )\n )\n",
"def _parse_response(self, response, target_object=strack):\n \"\"\" Generic response parser method \"\"\"\n objects = json.loads(response.read().decode(\"utf-8\"))\n list = []\n for obj in objects:\n list.append(target_object(obj, client=self.client))\n return list\n"
] | class suser:
name = None
client = None
def __init__(self, username, **kwargs):
""" Initialize soundcloud's user object. """
self.name = username
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def get_likes(self, offset=0, limit=50):
""" Get user's likes. """
response = self.client.get(
self.client.USER_LIKES % (self.name, offset, limit))
return self._parse_response(response, strack)
def get_playlists(self, offset=0, limit=50):
""" Get user's playlists. """
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists
def _parse_response(self, response, target_object=strack):
""" Generic response parser method """
objects = json.loads(response.read().decode("utf-8"))
list = []
for obj in objects:
list.append(target_object(obj, client=self.client))
return list
|
Sliim/soundcloud-syncer | ssyncer/suser.py | suser.get_playlists | python | def get_playlists(self, offset=0, limit=50):
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists | Get user's playlists. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/suser.py#L50-L56 | [
"def get(self, uri):\n \"\"\" Send a request to given uri. \"\"\"\n return self.send_request(\n \"{0}://{1}:{2}{3}{4}\".format(\n self.get_protocol(),\n self.host,\n self.port,\n uri,\n self.client_id\n )\n )\n",
"def _parse_response(self, response, target_object=strack):\n \"\"\" Generic response parser method \"\"\"\n objects = json.loads(response.read().decode(\"utf-8\"))\n list = []\n for obj in objects:\n list.append(target_object(obj, client=self.client))\n return list\n"
] | class suser:
name = None
client = None
def __init__(self, username, **kwargs):
""" Initialize soundcloud's user object. """
self.name = username
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def get_likes(self, offset=0, limit=50):
""" Get user's likes. """
response = self.client.get(
self.client.USER_LIKES % (self.name, offset, limit))
return self._parse_response(response, strack)
def get_tracks(self, offset=0, limit=50):
""" Get user's tracks. """
response = self.client.get(
self.client.USER_TRACKS % (self.name, offset, limit))
return self._parse_response(response, strack)
def _parse_response(self, response, target_object=strack):
""" Generic response parser method """
objects = json.loads(response.read().decode("utf-8"))
list = []
for obj in objects:
list.append(target_object(obj, client=self.client))
return list
|
Sliim/soundcloud-syncer | ssyncer/suser.py | suser._parse_response | python | def _parse_response(self, response, target_object=strack):
objects = json.loads(response.read().decode("utf-8"))
list = []
for obj in objects:
list.append(target_object(obj, client=self.client))
return list | Generic response parser method | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/suser.py#L58-L64 | null | class suser:
name = None
client = None
def __init__(self, username, **kwargs):
""" Initialize soundcloud's user object. """
self.name = username
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def get_likes(self, offset=0, limit=50):
""" Get user's likes. """
response = self.client.get(
self.client.USER_LIKES % (self.name, offset, limit))
return self._parse_response(response, strack)
def get_tracks(self, offset=0, limit=50):
""" Get user's tracks. """
response = self.client.get(
self.client.USER_TRACKS % (self.name, offset, limit))
return self._parse_response(response, strack)
def get_playlists(self, offset=0, limit=50):
""" Get user's playlists. """
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.get_download_link | python | def get_download_link(self):
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url | Get direct download link with soudcloud's redirect system. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L82-L99 | [
"def get(self, key):\n \"\"\" Get track metadata value from a given key. \"\"\"\n if key in self.metadata:\n return self.metadata[key]\n return None\n"
] | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.get_file_extension | python | def get_file_extension(self, filepath):
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext | This method check mimetype to define file extension.
If it can't, it use original-format metadata. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L107-L122 | null | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.gen_localdir | python | def gen_localdir(self, localdir):
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory | Generate local directory where track will be saved.
Create it if not exists. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L130-L138 | [
"def get(self, key):\n \"\"\" Get track metadata value from a given key. \"\"\"\n if key in self.metadata:\n return self.metadata[key]\n return None\n"
] | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.track_exists | python | def track_exists(self, localdir):
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False | Check if track exists in local directory. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L140-L146 | [
"def gen_filename(self):\n \"\"\" Generate local filename for this track. \"\"\"\n return \"{0}-{1}\".format(\n self.get(\"id\"),\n self.get(\"permalink\"))\n",
"def gen_localdir(self, localdir):\n \"\"\"\n Generate local directory where track will be saved.\n Create it if not exists.\n \"\"\"\n directory = \"{0}/{1}/\".format(localdir, self.get(\"username\"))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory\n"
] | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.get_ignored_tracks | python | def get_ignored_tracks(self, localdir):
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list | Get ignored tracks list. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L148-L160 | null | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.download | python | def download(self, localdir, max_retry):
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True | Download a track in local directory. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L162-L217 | [
"def gen_filename(self):\n \"\"\" Generate local filename for this track. \"\"\"\n return \"{0}-{1}\".format(\n self.get(\"id\"),\n self.get(\"permalink\"))\n",
"def gen_localdir(self, localdir):\n \"\"\"\n Generate local directory where track will be saved.\n Create it if not exists.\n \"\"\"\n directory = \"{0}/{1}/\".format(localdir, self.get(\"username\"))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory\n",
"def track_exists(self, localdir):\n \"\"\" Check if track exists in local directory. \"\"\"\n path = glob.glob(self.gen_localdir(localdir) +\n self.gen_filename() + \"*\")\n if len(path) > 0 and os.path.getsize(path[0]) > 0:\n return True\n return False\n"
] | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.process_tags | python | def process_tags(self, tag=None):
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath) | Process ID3 Tags for mp3 files. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L219-L231 | null | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.convert | python | def convert(self):
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3") | Convert file in mp3 format. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L233-L258 | null | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack.download_artwork | python | def download_artwork(self, localdir, max_retry):
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath | Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L264-L302 | [
"def get(self, key):\n \"\"\" Get track metadata value from a given key. \"\"\"\n if key in self.metadata:\n return self.metadata[key]\n return None\n"
] | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | strack._progress_hook | python | def _progress_hook(self, blocknum, blocksize, totalsize):
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read) | Progress hook for urlretrieve. | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L304-L316 | null | class strack:
client = None
metadata = {}
downloaded = False
filename = None
def __init__(self, track_data, **kwargs):
""" Track object initialization, load track metadata. """
if "client" in kwargs:
self.client = kwargs.get("client")
elif "client_id" in kwargs:
self.client = sclient(kwargs.get("client_id"))
else:
self.client = sclient()
def map(key, data):
return str(data[key]) if key in data else ""
self.metadata = {
"id": track_data["id"],
"kind": map("kind", track_data),
"title": map("title", track_data),
"permalink": map("permalink", track_data),
"username": map("permalink", track_data["user"]),
"artist": map("username", track_data["user"]),
"user-url": map("permalink_url", track_data["user"]),
"downloadable": track_data["downloadable"],
"original-format": map("original_format", track_data),
"created-at": map("created_at", track_data),
"duration": map("duration", track_data),
"tags-list": map("tags_list", track_data),
"genre": map("genre", track_data),
"description": map("description", track_data),
"license": map("license", track_data),
"uri": map("uri", track_data),
"permalink-url": map("permalink_url", track_data),
"artwork-url": map("artwork_url",
track_data).replace("large", "crop"),
}
def get(self, key):
""" Get track metadata value from a given key. """
if key in self.metadata:
return self.metadata[key]
return None
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
def gen_filename(self):
""" Generate local filename for this track. """
return "{0}-{1}".format(
self.get("id"),
self.get("permalink"))
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext
def gen_artwork_filename(self):
""" Generate artwork filename for cover of this track. """
return "{0}-{1}.jpg".format(
self.get("id"),
self.get("permalink"))
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False
def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list
def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True
def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3")
def filename_without_extension(self):
"""Return filename without extension"""
return re.sub("\.\w+$", "", self.filepath)
def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath
|
Sliim/soundcloud-syncer | ssyncer/strack.py | stag.load_id3 | python | def load_id3(self, track):
if not isinstance(track, strack):
raise TypeError('strack object required')
timestamp = calendar.timegm(parse(track.get("created-at")).timetuple())
self.mapper[TIT1] = TIT1(text=track.get("description"))
self.mapper[TIT2] = TIT2(text=track.get("title"))
self.mapper[TIT3] = TIT3(text=track.get("tags-list"))
self.mapper[TDOR] = TDOR(text=str(timestamp))
self.mapper[TLEN] = TLEN(text=track.get("duration"))
self.mapper[TOFN] = TOFN(text=track.get("permalink"))
self.mapper[TCON] = TCON(text=track.get("genre"))
self.mapper[TCOP] = TCOP(text=track.get("license"))
self.mapper[WOAS] = WOAS(url=track.get("permalink-url"))
self.mapper[WOAF] = WOAF(url=track.get("uri"))
self.mapper[TPUB] = TPUB(text=track.get("username"))
self.mapper[WOAR] = WOAR(url=track.get("user-url"))
self.mapper[TPE1] = TPE1(text=track.get("artist"))
self.mapper[TALB] = TALB(text="%s Soundcloud tracks"
% track.get("artist"))
if track.get("artwork-path") is not None:
self.mapper[APIC] = APIC(value=track.get("artwork-path")) | Load id3 tags from strack metadata | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L324-L348 | null | class stag:
def __init__(self):
self.tmpdir = tempfile.mkdtemp()
self.mapper = Tag24()
def write_id3(self, filename):
""" Write id3 tags """
if not os.path.exists(filename):
raise ValueError("File doesn't exists.")
self.mapper.write(filename)
|
Sliim/soundcloud-syncer | ssyncer/strack.py | stag.write_id3 | python | def write_id3(self, filename):
if not os.path.exists(filename):
raise ValueError("File doesn't exists.")
self.mapper.write(filename) | Write id3 tags | train | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L350-L355 | null | class stag:
def __init__(self):
self.tmpdir = tempfile.mkdtemp()
self.mapper = Tag24()
def load_id3(self, track):
""" Load id3 tags from strack metadata """
if not isinstance(track, strack):
raise TypeError('strack object required')
timestamp = calendar.timegm(parse(track.get("created-at")).timetuple())
self.mapper[TIT1] = TIT1(text=track.get("description"))
self.mapper[TIT2] = TIT2(text=track.get("title"))
self.mapper[TIT3] = TIT3(text=track.get("tags-list"))
self.mapper[TDOR] = TDOR(text=str(timestamp))
self.mapper[TLEN] = TLEN(text=track.get("duration"))
self.mapper[TOFN] = TOFN(text=track.get("permalink"))
self.mapper[TCON] = TCON(text=track.get("genre"))
self.mapper[TCOP] = TCOP(text=track.get("license"))
self.mapper[WOAS] = WOAS(url=track.get("permalink-url"))
self.mapper[WOAF] = WOAF(url=track.get("uri"))
self.mapper[TPUB] = TPUB(text=track.get("username"))
self.mapper[WOAR] = WOAR(url=track.get("user-url"))
self.mapper[TPE1] = TPE1(text=track.get("artist"))
self.mapper[TALB] = TALB(text="%s Soundcloud tracks"
% track.get("artist"))
if track.get("artwork-path") is not None:
self.mapper[APIC] = APIC(value=track.get("artwork-path"))
|
jreinhardt/constraining-order | src/constrainingorder/solver.py | ac3 | python | def ac3(space):
#determine arcs
arcs = {}
for name in space.variables:
arcs[name] = set([])
for const in space.constraints:
for vname1,vname2 in product(const.vnames,const.vnames):
if vname1 != vname2:
#this is pessimistic, we assume that each constraint
#pairwisely couples all variables it affects
arcs[vname1].add(vname2)
#enforce node consistency
for vname in space.variables:
for const in space.constraints:
_unary(space,const,vname)
#assemble work list
worklist = set([])
for v1 in space.variables:
for v2 in space.variables:
for const in space.constraints:
if _binary(space,const,v1,v2):
for name in arcs[v1]:
worklist.add((v1,name))
#work through work list
while worklist:
v1,v2 = worklist.pop()
for const in space.constraints:
if _binary(space,const,v1,v2):
for vname in arcs[v1]:
worklist.add((v1,vname)) | AC-3 algorithm. This reduces the domains of the variables by
propagating constraints to ensure arc consistency.
:param Space space: The space to reduce | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L32-L70 | [
"def _binary(space,const,name1,name2):\n \"\"\"\n reduce the domain of variable name1 to be two-consistent (arc-consistent)\n with this constraint, i.e. remove those values for the variable name1,\n for which no values for name2 exist such that this pair is consistent\n with the constraint\n\n returns True if the domain of name1 was modified\n \"\"\"\n if not (name1 in const.vnames and name2 in const.vnames):\n return False\n remove = set([])\n for v1 in space.domains[name1].iter_members():\n for v2 in space.domains[name2].iter_members():\n if const.consistent({name1 : v1, name2 : v2}):\n break\n else:\n remove.add(v1)\n\n if len(remove) > 0:\n if space.variables[name1].discrete:\n remove = DiscreteSet(remove)\n else:\n remove = IntervalSet.from_values(remove)\n\n space.domains[name1] = space.domains[name1].difference(remove)\n return True\n else:\n return False\n",
"def _unary(space,const,name):\n \"\"\"\n Reduce the domain of variable name to be node-consistent with this\n constraint, i.e. remove those values for the variable that are not\n consistent with the constraint.\n\n returns True if the domain of name was modified\n \"\"\"\n if not name in const.vnames:\n return False\n if space.variables[name].discrete:\n values = const.domains[name]\n else:\n values = const.domains[name]\n\n space.domains[name] = space.domains[name].intersection(values)\n return True\n"
] | #Constraining Order - a simple constraint satisfaction library
#
#Copyright (c) 2015 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
This module contains functions for solving and reducing CSPs
"""
from __future__ import unicode_literals
from itertools import product
from constrainingorder import Space
from constrainingorder.constraints import FixedValue
from constrainingorder.sets import DiscreteSet, IntervalSet
def _unary(space,const,name):
"""
Reduce the domain of variable name to be node-consistent with this
constraint, i.e. remove those values for the variable that are not
consistent with the constraint.
returns True if the domain of name was modified
"""
if not name in const.vnames:
return False
if space.variables[name].discrete:
values = const.domains[name]
else:
values = const.domains[name]
space.domains[name] = space.domains[name].intersection(values)
return True
def _binary(space,const,name1,name2):
"""
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
"""
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False
def solve(space,method='backtrack',ordering=None):
"""
Generator for all solutions.
:param str method: the solution method to employ
:param ordering: an optional parameter ordering
:type ordering: sequence of parameter names
Methods:
:"backtrack": simple chronological backtracking
:"ac-lookahead": full lookahead
"""
if ordering is None:
ordering = list(space.variables.keys())
if not space.is_discrete():
raise ValueError("Can not backtrack on non-discrete space")
if method=='backtrack':
for label in _backtrack(space,{},ordering):
yield label
elif method=='ac-lookahead':
for label in _lookahead(space,{},ordering):
yield label
else:
raise ValueError("Unknown solution method: %s" % method)
def _backtrack(space,label,ordering):
level = len(label)
if level == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
newlabel[vname] = val
for sol in _backtrack(space,newlabel,ordering):
yield sol
def _lookahead(space,label,ordering):
level = len(label)
if len(label) == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
var = space.variables[vname]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
nspace = Space(list(space.variables.values()),
space.constraints + [FixedValue(var,val)])
newlabel[vname] = val
ac3(nspace)
for sol in _lookahead(nspace,newlabel,ordering):
yield sol
|
jreinhardt/constraining-order | src/constrainingorder/solver.py | _unary | python | def _unary(space,const,name):
if not name in const.vnames:
return False
if space.variables[name].discrete:
values = const.domains[name]
else:
values = const.domains[name]
space.domains[name] = space.domains[name].intersection(values)
return True | Reduce the domain of variable name to be node-consistent with this
constraint, i.e. remove those values for the variable that are not
consistent with the constraint.
returns True if the domain of name was modified | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L72-L88 | null | #Constraining Order - a simple constraint satisfaction library
#
#Copyright (c) 2015 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
This module contains functions for solving and reducing CSPs
"""
from __future__ import unicode_literals
from itertools import product
from constrainingorder import Space
from constrainingorder.constraints import FixedValue
from constrainingorder.sets import DiscreteSet, IntervalSet
def ac3(space):
"""
AC-3 algorithm. This reduces the domains of the variables by
propagating constraints to ensure arc consistency.
:param Space space: The space to reduce
"""
#determine arcs
arcs = {}
for name in space.variables:
arcs[name] = set([])
for const in space.constraints:
for vname1,vname2 in product(const.vnames,const.vnames):
if vname1 != vname2:
#this is pessimistic, we assume that each constraint
#pairwisely couples all variables it affects
arcs[vname1].add(vname2)
#enforce node consistency
for vname in space.variables:
for const in space.constraints:
_unary(space,const,vname)
#assemble work list
worklist = set([])
for v1 in space.variables:
for v2 in space.variables:
for const in space.constraints:
if _binary(space,const,v1,v2):
for name in arcs[v1]:
worklist.add((v1,name))
#work through work list
while worklist:
v1,v2 = worklist.pop()
for const in space.constraints:
if _binary(space,const,v1,v2):
for vname in arcs[v1]:
worklist.add((v1,vname))
def _binary(space,const,name1,name2):
"""
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
"""
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False
def solve(space,method='backtrack',ordering=None):
"""
Generator for all solutions.
:param str method: the solution method to employ
:param ordering: an optional parameter ordering
:type ordering: sequence of parameter names
Methods:
:"backtrack": simple chronological backtracking
:"ac-lookahead": full lookahead
"""
if ordering is None:
ordering = list(space.variables.keys())
if not space.is_discrete():
raise ValueError("Can not backtrack on non-discrete space")
if method=='backtrack':
for label in _backtrack(space,{},ordering):
yield label
elif method=='ac-lookahead':
for label in _lookahead(space,{},ordering):
yield label
else:
raise ValueError("Unknown solution method: %s" % method)
def _backtrack(space,label,ordering):
level = len(label)
if level == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
newlabel[vname] = val
for sol in _backtrack(space,newlabel,ordering):
yield sol
def _lookahead(space,label,ordering):
level = len(label)
if len(label) == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
var = space.variables[vname]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
nspace = Space(list(space.variables.values()),
space.constraints + [FixedValue(var,val)])
newlabel[vname] = val
ac3(nspace)
for sol in _lookahead(nspace,newlabel,ordering):
yield sol
|
jreinhardt/constraining-order | src/constrainingorder/solver.py | _binary | python | def _binary(space,const,name1,name2):
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False | reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L90-L118 | [
"def consistent(self,lab):\n for v1,v2 in product(self.vnames,repeat=2):\n if v1 not in lab or v2 not in lab or v1 == v2:\n continue\n if lab[v1] == lab[v2]:\n return False\n return True\n",
"def consistent(self,lab):\n incomplete = False\n for v in self.vnames:\n if v not in lab:\n incomplete = True\n continue\n elif not lab[v] in self.domains[v]:\n return False\n if incomplete:\n return True\n return self.relation(lab[self.v1],lab[self.v2])\n",
"def consistent(self,lab):\n incomplete = False\n for v in self.vnames:\n if v not in lab:\n incomplete = True\n continue\n elif not lab[v] in self.domains[v]:\n return False\n if incomplete:\n return True\n return (lab[self.v1],lab[self.v2]) in self.tuples\n",
"def from_values(cls,values):\n \"\"\"\n Create a new IntervalSet representing a set of isolated real numbers.\n\n :param sequence values: The values for this IntervalSet\n \"\"\"\n return cls([Interval.from_value(v) for v in values])\n"
] | #Constraining Order - a simple constraint satisfaction library
#
#Copyright (c) 2015 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
This module contains functions for solving and reducing CSPs
"""
from __future__ import unicode_literals
from itertools import product
from constrainingorder import Space
from constrainingorder.constraints import FixedValue
from constrainingorder.sets import DiscreteSet, IntervalSet
def ac3(space):
"""
AC-3 algorithm. This reduces the domains of the variables by
propagating constraints to ensure arc consistency.
:param Space space: The space to reduce
"""
#determine arcs
arcs = {}
for name in space.variables:
arcs[name] = set([])
for const in space.constraints:
for vname1,vname2 in product(const.vnames,const.vnames):
if vname1 != vname2:
#this is pessimistic, we assume that each constraint
#pairwisely couples all variables it affects
arcs[vname1].add(vname2)
#enforce node consistency
for vname in space.variables:
for const in space.constraints:
_unary(space,const,vname)
#assemble work list
worklist = set([])
for v1 in space.variables:
for v2 in space.variables:
for const in space.constraints:
if _binary(space,const,v1,v2):
for name in arcs[v1]:
worklist.add((v1,name))
#work through work list
while worklist:
v1,v2 = worklist.pop()
for const in space.constraints:
if _binary(space,const,v1,v2):
for vname in arcs[v1]:
worklist.add((v1,vname))
def _unary(space,const,name):
"""
Reduce the domain of variable name to be node-consistent with this
constraint, i.e. remove those values for the variable that are not
consistent with the constraint.
returns True if the domain of name was modified
"""
if not name in const.vnames:
return False
if space.variables[name].discrete:
values = const.domains[name]
else:
values = const.domains[name]
space.domains[name] = space.domains[name].intersection(values)
return True
def solve(space,method='backtrack',ordering=None):
"""
Generator for all solutions.
:param str method: the solution method to employ
:param ordering: an optional parameter ordering
:type ordering: sequence of parameter names
Methods:
:"backtrack": simple chronological backtracking
:"ac-lookahead": full lookahead
"""
if ordering is None:
ordering = list(space.variables.keys())
if not space.is_discrete():
raise ValueError("Can not backtrack on non-discrete space")
if method=='backtrack':
for label in _backtrack(space,{},ordering):
yield label
elif method=='ac-lookahead':
for label in _lookahead(space,{},ordering):
yield label
else:
raise ValueError("Unknown solution method: %s" % method)
def _backtrack(space,label,ordering):
level = len(label)
if level == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
newlabel[vname] = val
for sol in _backtrack(space,newlabel,ordering):
yield sol
def _lookahead(space,label,ordering):
level = len(label)
if len(label) == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
var = space.variables[vname]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
nspace = Space(list(space.variables.values()),
space.constraints + [FixedValue(var,val)])
newlabel[vname] = val
ac3(nspace)
for sol in _lookahead(nspace,newlabel,ordering):
yield sol
|
jreinhardt/constraining-order | src/constrainingorder/solver.py | solve | python | def solve(space,method='backtrack',ordering=None):
if ordering is None:
ordering = list(space.variables.keys())
if not space.is_discrete():
raise ValueError("Can not backtrack on non-discrete space")
if method=='backtrack':
for label in _backtrack(space,{},ordering):
yield label
elif method=='ac-lookahead':
for label in _lookahead(space,{},ordering):
yield label
else:
raise ValueError("Unknown solution method: %s" % method) | Generator for all solutions.
:param str method: the solution method to employ
:param ordering: an optional parameter ordering
:type ordering: sequence of parameter names
Methods:
:"backtrack": simple chronological backtracking
:"ac-lookahead": full lookahead | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L120-L146 | [
"def _backtrack(space,label,ordering):\n level = len(label)\n if level == len(space.variables):\n if space.satisfied(label):\n yield label\n elif space.consistent(label):\n vname = ordering[level]\n newlabel = label.copy()\n for val in space.domains[vname].iter_members():\n newlabel[vname] = val\n for sol in _backtrack(space,newlabel,ordering):\n yield sol\n",
"def _lookahead(space,label,ordering):\n level = len(label)\n if len(label) == len(space.variables):\n if space.satisfied(label):\n yield label\n elif space.consistent(label):\n vname = ordering[level]\n var = space.variables[vname]\n newlabel = label.copy()\n for val in space.domains[vname].iter_members():\n nspace = Space(list(space.variables.values()),\n space.constraints + [FixedValue(var,val)])\n newlabel[vname] = val\n ac3(nspace)\n for sol in _lookahead(nspace,newlabel,ordering):\n yield sol\n",
"def is_discrete(self):\n \"\"\"\n Return whether this space is discrete\n \"\"\"\n for domain in self.domains.values():\n if not domain.is_discrete():\n return False\n return True\n"
] | #Constraining Order - a simple constraint satisfaction library
#
#Copyright (c) 2015 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
This module contains functions for solving and reducing CSPs
"""
from __future__ import unicode_literals
from itertools import product
from constrainingorder import Space
from constrainingorder.constraints import FixedValue
from constrainingorder.sets import DiscreteSet, IntervalSet
def ac3(space):
"""
AC-3 algorithm. This reduces the domains of the variables by
propagating constraints to ensure arc consistency.
:param Space space: The space to reduce
"""
#determine arcs
arcs = {}
for name in space.variables:
arcs[name] = set([])
for const in space.constraints:
for vname1,vname2 in product(const.vnames,const.vnames):
if vname1 != vname2:
#this is pessimistic, we assume that each constraint
#pairwisely couples all variables it affects
arcs[vname1].add(vname2)
#enforce node consistency
for vname in space.variables:
for const in space.constraints:
_unary(space,const,vname)
#assemble work list
worklist = set([])
for v1 in space.variables:
for v2 in space.variables:
for const in space.constraints:
if _binary(space,const,v1,v2):
for name in arcs[v1]:
worklist.add((v1,name))
#work through work list
while worklist:
v1,v2 = worklist.pop()
for const in space.constraints:
if _binary(space,const,v1,v2):
for vname in arcs[v1]:
worklist.add((v1,vname))
def _unary(space,const,name):
"""
Reduce the domain of variable name to be node-consistent with this
constraint, i.e. remove those values for the variable that are not
consistent with the constraint.
returns True if the domain of name was modified
"""
if not name in const.vnames:
return False
if space.variables[name].discrete:
values = const.domains[name]
else:
values = const.domains[name]
space.domains[name] = space.domains[name].intersection(values)
return True
def _binary(space,const,name1,name2):
"""
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
"""
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False
def _backtrack(space,label,ordering):
level = len(label)
if level == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
newlabel[vname] = val
for sol in _backtrack(space,newlabel,ordering):
yield sol
def _lookahead(space,label,ordering):
level = len(label)
if len(label) == len(space.variables):
if space.satisfied(label):
yield label
elif space.consistent(label):
vname = ordering[level]
var = space.variables[vname]
newlabel = label.copy()
for val in space.domains[vname].iter_members():
nspace = Space(list(space.variables.values()),
space.constraints + [FixedValue(var,val)])
newlabel[vname] = val
ac3(nspace)
for sol in _lookahead(nspace,newlabel,ordering):
yield sol
|
jreinhardt/constraining-order | src/constrainingorder/__init__.py | Space.is_discrete | python | def is_discrete(self):
for domain in self.domains.values():
if not domain.is_discrete():
return False
return True | Return whether this space is discrete | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/__init__.py#L53-L60 | null | class Space(object):
"""
A space is a description of the computation space for a specific CSP.
"""
def __init__(self,variables, constraints):
"""
Create a new Space for a CSP
:param variables: The variables of the CSP
:type variables: sequence of Variables
:param constraints: The constraints of the CSP
:type constraints: sequence of Constraints
"""
self.constraints = constraints
"list of constraints"
self.variables = {}
"dictionary of variable names to variable instances"
self.domains = {}
"dictionary of variable names to DiscreteSet/IntervalSet with admissible values"
for var in variables:
self.variables[var.name] = var
self.domains[var.name] = var.domain
def consistent(self,lab):
"""
Check whether the labeling is consistent with all constraints
"""
for const in self.constraints:
if not const.consistent(lab):
return False
return True
def satisfied(self,lab):
"""
Check whether the labeling satisfies all constraints
"""
for const in self.constraints:
if not const.satisfied(lab):
return False
return True
|
jreinhardt/constraining-order | src/constrainingorder/__init__.py | Space.consistent | python | def consistent(self,lab):
for const in self.constraints:
if not const.consistent(lab):
return False
return True | Check whether the labeling is consistent with all constraints | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/__init__.py#L61-L68 | null | class Space(object):
"""
A space is a description of the computation space for a specific CSP.
"""
def __init__(self,variables, constraints):
"""
Create a new Space for a CSP
:param variables: The variables of the CSP
:type variables: sequence of Variables
:param constraints: The constraints of the CSP
:type constraints: sequence of Constraints
"""
self.constraints = constraints
"list of constraints"
self.variables = {}
"dictionary of variable names to variable instances"
self.domains = {}
"dictionary of variable names to DiscreteSet/IntervalSet with admissible values"
for var in variables:
self.variables[var.name] = var
self.domains[var.name] = var.domain
def is_discrete(self):
"""
Return whether this space is discrete
"""
for domain in self.domains.values():
if not domain.is_discrete():
return False
return True
def satisfied(self,lab):
"""
Check whether the labeling satisfies all constraints
"""
for const in self.constraints:
if not const.satisfied(lab):
return False
return True
|
jreinhardt/constraining-order | src/constrainingorder/__init__.py | Space.satisfied | python | def satisfied(self,lab):
for const in self.constraints:
if not const.satisfied(lab):
return False
return True | Check whether the labeling satisfies all constraints | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/__init__.py#L69-L76 | null | class Space(object):
"""
A space is a description of the computation space for a specific CSP.
"""
def __init__(self,variables, constraints):
"""
Create a new Space for a CSP
:param variables: The variables of the CSP
:type variables: sequence of Variables
:param constraints: The constraints of the CSP
:type constraints: sequence of Constraints
"""
self.constraints = constraints
"list of constraints"
self.variables = {}
"dictionary of variable names to variable instances"
self.domains = {}
"dictionary of variable names to DiscreteSet/IntervalSet with admissible values"
for var in variables:
self.variables[var.name] = var
self.domains[var.name] = var.domain
def is_discrete(self):
"""
Return whether this space is discrete
"""
for domain in self.domains.values():
if not domain.is_discrete():
return False
return True
def consistent(self,lab):
"""
Check whether the labeling is consistent with all constraints
"""
for const in self.constraints:
if not const.consistent(lab):
return False
return True
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Interval.is_disjoint | python | def is_disjoint(self,other):
if self.is_empty() or other.is_empty():
return True
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
#coincident lower bounds
if self.is_discrete() and not other.included[0]:
return True
elif other.is_discrete() and not self.included[0]:
return True
else:
return False
return not i2.bounds[0] in i1 | Check whether two Intervals are disjoint.
:param Interval other: The Interval to check disjointedness with. | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L111-L133 | [
"def is_empty(self):\n \"\"\"\n Check whether this interval is empty.\n\n :rtype: bool\n \"\"\"\n if self.bounds[1] < self.bounds[0]:\n return True\n if self.bounds[1] == self.bounds[0]:\n return not (self.included[0] and self.included[1])\n"
] | class Interval(object):
"""
An interval on the real axis.
"""
def __init__(self,bounds,included):
"""
Create a new Interval with bounds. If the right bound is larger than
the left bound, the interval is assumed to be empty.
:param sequence bounds: left and right bounds
:param sequence included: bools indicating whether the bounds are
included in the interval.
"""
self.bounds = tuple(bounds)
self.included = tuple(included)
@classmethod
def everything(cls):
"""
Create a new Interval representing the full real axis
"""
return cls((-float("inf"),float("inf")),(True,True))
@classmethod
def from_value(cls,value):
"""
Create a new Interval representing a single real number.
:param float value: The member of the Interval
"""
return cls((value,value),(True,True))
@classmethod
def open(cls,a,b):
"""
Create a new open Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,False))
@classmethod
def closed(cls,a,b):
"""
Create a new closed Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,True))
@classmethod
def leftopen(cls,a,b):
"""
Create a new halfopen Interval (left bound is excluded, right bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,True))
@classmethod
def rightopen(cls,a,b):
"""
Create a new halfopen Interval (right bound is excluded, left bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,False))
def _difference(self,other):
#the set of intervals is not closed w.r.t the difference, as it might
#yield zeor,one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty():
return []
if other.is_empty() or self.is_disjoint(other):
return [self]
b1 = (self.bounds[0],other.bounds[0])
i1 = (self.included[0],not other.included[0])
int1 = Interval(b1,i1)
b2 = (other.bounds[1],self.bounds[1])
i2 = (not other.included[1],self.included[1])
int2 = Interval(b2,i2)
if other.bounds[0] in self and other.bounds[1] in self:
#-------
# ***
return [int1,int2]
elif other.bounds[0] in self:
bounds = (self.bounds[0],other.bounds[0])
include = (self.included[0],not other.included[0])
#-------
# *********
return [int1]
elif other.bounds[1] in self:
# -------
#*******
return [int2]
else:
raise RuntimeError("This should not happen")
def _union(self,other):
#the set of intervals is not closed w.r.t the union, as it might
#yield one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty() and other.is_empty():
return []
elif self.is_empty():
return [other]
elif other.is_empty():
return [self]
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
if self.included[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if i1.is_disjoint(i2):
return [i1,i2]
elif i2.bounds[0] in i1 and i2.bounds[1] in i1:
#-------
# ***
return [i1]
elif i2.bounds[0] in i1:
bounds = (i1.bounds[0],i2.bounds[1])
include = (i1.included[0],i2.included[1])
#-------
# *********
return [Interval(bounds,include)]
else:
raise RuntimeError("This should not happen")
def intersection(self,other):
"""
Return a new Interval with the intersection of the two intervals,
i.e. all elements that are in both self and other.
:param Interval other: Interval to intersect with
:rtype: Interval
"""
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if self.is_disjoint(other):
return Interval((1,0),(True,True))
bounds = [None,None]
included = [None,None]
#sets are not disjoint, so i2.bounds[0] in i1:
bounds[0] = i2.bounds[0]
included[0] = i2.included[0]
if i2.bounds[1] in i1:
bounds[1] = i2.bounds[1]
included[1] = i2.included[1]
else:
bounds[1] = i1.bounds[1]
included[1] = i1.included[1]
return Interval(bounds,included)
def is_empty(self):
"""
Check whether this interval is empty.
:rtype: bool
"""
if self.bounds[1] < self.bounds[0]:
return True
if self.bounds[1] == self.bounds[0]:
return not (self.included[0] and self.included[1])
def is_discrete(self):
"""
Check whether this interval contains exactly one number
:rtype: bool
"""
return self.bounds[1] == self.bounds[0] and\
self.included == (True,True)
def get_point(self):
"""
Return the number contained in this interval.
:rtype: float
:raises ValueError: if Interval contains more than exactly one number.
"""
if not self.is_discrete():
raise ValueError("Interval doesn't contain exactly one value")
return self.bounds[0]
def __contains__(self,x):
"""
Check membership of the element.
:param float x: Element to check membership of
:rtype: bool
"""
if self.is_empty():
return False
if self.included[0]:
if not (x >= self.bounds[0]):
return False
else:
if not (x > self.bounds[0]):
return False
if self.included[1]:
if not (x <= self.bounds[1]):
return False
else:
if not (x < self.bounds[1]):
return False
return True
def __repr__(self):
if self.is_empty():
return "Interval((1,0),(False,False))"
return "Interval(%s,%s)" % (self.bounds,self.included)
def __str__(self):
if self.is_empty():
return "<empty set>"
else:
left = ["(","["]
right = [")","]"]
bnd = "%s,%s" % self.bounds
brk = (left[self.included[0]],right[self.included[1]])
return "%s%s%s" % (brk[0],bnd,brk[1])
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Interval.intersection | python | def intersection(self,other):
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if self.is_disjoint(other):
return Interval((1,0),(True,True))
bounds = [None,None]
included = [None,None]
#sets are not disjoint, so i2.bounds[0] in i1:
bounds[0] = i2.bounds[0]
included[0] = i2.included[0]
if i2.bounds[1] in i1:
bounds[1] = i2.bounds[1]
included[1] = i2.included[1]
else:
bounds[1] = i1.bounds[1]
included[1] = i1.included[1]
return Interval(bounds,included) | Return a new Interval with the intersection of the two intervals,
i.e. all elements that are in both self and other.
:param Interval other: Interval to intersect with
:rtype: Interval | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L209-L238 | [
"def is_disjoint(self,other):\n \"\"\"\n Check whether two Intervals are disjoint.\n\n :param Interval other: The Interval to check disjointedness with.\n \"\"\"\n if self.is_empty() or other.is_empty():\n return True\n\n if self.bounds[0] < other.bounds[0]:\n i1,i2 = self,other\n elif self.bounds[0] > other.bounds[0]:\n i2,i1 = self,other\n else:\n #coincident lower bounds\n if self.is_discrete() and not other.included[0]:\n return True\n elif other.is_discrete() and not self.included[0]:\n return True\n else:\n return False\n\n return not i2.bounds[0] in i1\n"
] | class Interval(object):
"""
An interval on the real axis.
"""
def __init__(self,bounds,included):
"""
Create a new Interval with bounds. If the right bound is larger than
the left bound, the interval is assumed to be empty.
:param sequence bounds: left and right bounds
:param sequence included: bools indicating whether the bounds are
included in the interval.
"""
self.bounds = tuple(bounds)
self.included = tuple(included)
@classmethod
def everything(cls):
"""
Create a new Interval representing the full real axis
"""
return cls((-float("inf"),float("inf")),(True,True))
@classmethod
def from_value(cls,value):
"""
Create a new Interval representing a single real number.
:param float value: The member of the Interval
"""
return cls((value,value),(True,True))
@classmethod
def open(cls,a,b):
"""
Create a new open Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,False))
@classmethod
def closed(cls,a,b):
"""
Create a new closed Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,True))
@classmethod
def leftopen(cls,a,b):
"""
Create a new halfopen Interval (left bound is excluded, right bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,True))
@classmethod
def rightopen(cls,a,b):
"""
Create a new halfopen Interval (right bound is excluded, left bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,False))
def is_disjoint(self,other):
"""
Check whether two Intervals are disjoint.
:param Interval other: The Interval to check disjointedness with.
"""
if self.is_empty() or other.is_empty():
return True
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
#coincident lower bounds
if self.is_discrete() and not other.included[0]:
return True
elif other.is_discrete() and not self.included[0]:
return True
else:
return False
return not i2.bounds[0] in i1
def _difference(self,other):
#the set of intervals is not closed w.r.t the difference, as it might
#yield zeor,one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty():
return []
if other.is_empty() or self.is_disjoint(other):
return [self]
b1 = (self.bounds[0],other.bounds[0])
i1 = (self.included[0],not other.included[0])
int1 = Interval(b1,i1)
b2 = (other.bounds[1],self.bounds[1])
i2 = (not other.included[1],self.included[1])
int2 = Interval(b2,i2)
if other.bounds[0] in self and other.bounds[1] in self:
#-------
# ***
return [int1,int2]
elif other.bounds[0] in self:
bounds = (self.bounds[0],other.bounds[0])
include = (self.included[0],not other.included[0])
#-------
# *********
return [int1]
elif other.bounds[1] in self:
# -------
#*******
return [int2]
else:
raise RuntimeError("This should not happen")
def _union(self,other):
#the set of intervals is not closed w.r.t the union, as it might
#yield one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty() and other.is_empty():
return []
elif self.is_empty():
return [other]
elif other.is_empty():
return [self]
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
if self.included[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if i1.is_disjoint(i2):
return [i1,i2]
elif i2.bounds[0] in i1 and i2.bounds[1] in i1:
#-------
# ***
return [i1]
elif i2.bounds[0] in i1:
bounds = (i1.bounds[0],i2.bounds[1])
include = (i1.included[0],i2.included[1])
#-------
# *********
return [Interval(bounds,include)]
else:
raise RuntimeError("This should not happen")
def is_empty(self):
"""
Check whether this interval is empty.
:rtype: bool
"""
if self.bounds[1] < self.bounds[0]:
return True
if self.bounds[1] == self.bounds[0]:
return not (self.included[0] and self.included[1])
def is_discrete(self):
"""
Check whether this interval contains exactly one number
:rtype: bool
"""
return self.bounds[1] == self.bounds[0] and\
self.included == (True,True)
def get_point(self):
"""
Return the number contained in this interval.
:rtype: float
:raises ValueError: if Interval contains more than exactly one number.
"""
if not self.is_discrete():
raise ValueError("Interval doesn't contain exactly one value")
return self.bounds[0]
def __contains__(self,x):
"""
Check membership of the element.
:param float x: Element to check membership of
:rtype: bool
"""
if self.is_empty():
return False
if self.included[0]:
if not (x >= self.bounds[0]):
return False
else:
if not (x > self.bounds[0]):
return False
if self.included[1]:
if not (x <= self.bounds[1]):
return False
else:
if not (x < self.bounds[1]):
return False
return True
def __repr__(self):
if self.is_empty():
return "Interval((1,0),(False,False))"
return "Interval(%s,%s)" % (self.bounds,self.included)
def __str__(self):
if self.is_empty():
return "<empty set>"
else:
left = ["(","["]
right = [")","]"]
bnd = "%s,%s" % self.bounds
brk = (left[self.included[0]],right[self.included[1]])
return "%s%s%s" % (brk[0],bnd,brk[1])
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Interval.is_empty | python | def is_empty(self):
if self.bounds[1] < self.bounds[0]:
return True
if self.bounds[1] == self.bounds[0]:
return not (self.included[0] and self.included[1]) | Check whether this interval is empty.
:rtype: bool | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L240-L249 | null | class Interval(object):
"""
An interval on the real axis.
"""
def __init__(self,bounds,included):
"""
Create a new Interval with bounds. If the right bound is larger than
the left bound, the interval is assumed to be empty.
:param sequence bounds: left and right bounds
:param sequence included: bools indicating whether the bounds are
included in the interval.
"""
self.bounds = tuple(bounds)
self.included = tuple(included)
@classmethod
def everything(cls):
"""
Create a new Interval representing the full real axis
"""
return cls((-float("inf"),float("inf")),(True,True))
@classmethod
def from_value(cls,value):
"""
Create a new Interval representing a single real number.
:param float value: The member of the Interval
"""
return cls((value,value),(True,True))
@classmethod
def open(cls,a,b):
"""
Create a new open Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,False))
@classmethod
def closed(cls,a,b):
"""
Create a new closed Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,True))
@classmethod
def leftopen(cls,a,b):
"""
Create a new halfopen Interval (left bound is excluded, right bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,True))
@classmethod
def rightopen(cls,a,b):
"""
Create a new halfopen Interval (right bound is excluded, left bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,False))
def is_disjoint(self,other):
"""
Check whether two Intervals are disjoint.
:param Interval other: The Interval to check disjointedness with.
"""
if self.is_empty() or other.is_empty():
return True
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
#coincident lower bounds
if self.is_discrete() and not other.included[0]:
return True
elif other.is_discrete() and not self.included[0]:
return True
else:
return False
return not i2.bounds[0] in i1
def _difference(self,other):
#the set of intervals is not closed w.r.t the difference, as it might
#yield zeor,one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty():
return []
if other.is_empty() or self.is_disjoint(other):
return [self]
b1 = (self.bounds[0],other.bounds[0])
i1 = (self.included[0],not other.included[0])
int1 = Interval(b1,i1)
b2 = (other.bounds[1],self.bounds[1])
i2 = (not other.included[1],self.included[1])
int2 = Interval(b2,i2)
if other.bounds[0] in self and other.bounds[1] in self:
#-------
# ***
return [int1,int2]
elif other.bounds[0] in self:
bounds = (self.bounds[0],other.bounds[0])
include = (self.included[0],not other.included[0])
#-------
# *********
return [int1]
elif other.bounds[1] in self:
# -------
#*******
return [int2]
else:
raise RuntimeError("This should not happen")
def _union(self,other):
#the set of intervals is not closed w.r.t the union, as it might
#yield one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty() and other.is_empty():
return []
elif self.is_empty():
return [other]
elif other.is_empty():
return [self]
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
if self.included[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if i1.is_disjoint(i2):
return [i1,i2]
elif i2.bounds[0] in i1 and i2.bounds[1] in i1:
#-------
# ***
return [i1]
elif i2.bounds[0] in i1:
bounds = (i1.bounds[0],i2.bounds[1])
include = (i1.included[0],i2.included[1])
#-------
# *********
return [Interval(bounds,include)]
else:
raise RuntimeError("This should not happen")
def intersection(self,other):
"""
Return a new Interval with the intersection of the two intervals,
i.e. all elements that are in both self and other.
:param Interval other: Interval to intersect with
:rtype: Interval
"""
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if self.is_disjoint(other):
return Interval((1,0),(True,True))
bounds = [None,None]
included = [None,None]
#sets are not disjoint, so i2.bounds[0] in i1:
bounds[0] = i2.bounds[0]
included[0] = i2.included[0]
if i2.bounds[1] in i1:
bounds[1] = i2.bounds[1]
included[1] = i2.included[1]
else:
bounds[1] = i1.bounds[1]
included[1] = i1.included[1]
return Interval(bounds,included)
def is_discrete(self):
"""
Check whether this interval contains exactly one number
:rtype: bool
"""
return self.bounds[1] == self.bounds[0] and\
self.included == (True,True)
def get_point(self):
"""
Return the number contained in this interval.
:rtype: float
:raises ValueError: if Interval contains more than exactly one number.
"""
if not self.is_discrete():
raise ValueError("Interval doesn't contain exactly one value")
return self.bounds[0]
def __contains__(self,x):
"""
Check membership of the element.
:param float x: Element to check membership of
:rtype: bool
"""
if self.is_empty():
return False
if self.included[0]:
if not (x >= self.bounds[0]):
return False
else:
if not (x > self.bounds[0]):
return False
if self.included[1]:
if not (x <= self.bounds[1]):
return False
else:
if not (x < self.bounds[1]):
return False
return True
def __repr__(self):
if self.is_empty():
return "Interval((1,0),(False,False))"
return "Interval(%s,%s)" % (self.bounds,self.included)
def __str__(self):
if self.is_empty():
return "<empty set>"
else:
left = ["(","["]
right = [")","]"]
bnd = "%s,%s" % self.bounds
brk = (left[self.included[0]],right[self.included[1]])
return "%s%s%s" % (brk[0],bnd,brk[1])
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Interval.is_discrete | python | def is_discrete(self):
return self.bounds[1] == self.bounds[0] and\
self.included == (True,True) | Check whether this interval contains exactly one number
:rtype: bool | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L251-L258 | null | class Interval(object):
"""
An interval on the real axis.
"""
def __init__(self,bounds,included):
"""
Create a new Interval with bounds. If the right bound is larger than
the left bound, the interval is assumed to be empty.
:param sequence bounds: left and right bounds
:param sequence included: bools indicating whether the bounds are
included in the interval.
"""
self.bounds = tuple(bounds)
self.included = tuple(included)
@classmethod
def everything(cls):
"""
Create a new Interval representing the full real axis
"""
return cls((-float("inf"),float("inf")),(True,True))
@classmethod
def from_value(cls,value):
"""
Create a new Interval representing a single real number.
:param float value: The member of the Interval
"""
return cls((value,value),(True,True))
@classmethod
def open(cls,a,b):
"""
Create a new open Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,False))
@classmethod
def closed(cls,a,b):
"""
Create a new closed Interval.
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,True))
@classmethod
def leftopen(cls,a,b):
"""
Create a new halfopen Interval (left bound is excluded, right bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(False,True))
@classmethod
def rightopen(cls,a,b):
"""
Create a new halfopen Interval (right bound is excluded, left bound
included).
:param float a: Left bound
:param float b: Right bound
"""
return cls((a,b),(True,False))
def is_disjoint(self,other):
"""
Check whether two Intervals are disjoint.
:param Interval other: The Interval to check disjointedness with.
"""
if self.is_empty() or other.is_empty():
return True
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
#coincident lower bounds
if self.is_discrete() and not other.included[0]:
return True
elif other.is_discrete() and not self.included[0]:
return True
else:
return False
return not i2.bounds[0] in i1
def _difference(self,other):
#the set of intervals is not closed w.r.t the difference, as it might
#yield zeor,one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty():
return []
if other.is_empty() or self.is_disjoint(other):
return [self]
b1 = (self.bounds[0],other.bounds[0])
i1 = (self.included[0],not other.included[0])
int1 = Interval(b1,i1)
b2 = (other.bounds[1],self.bounds[1])
i2 = (not other.included[1],self.included[1])
int2 = Interval(b2,i2)
if other.bounds[0] in self and other.bounds[1] in self:
#-------
# ***
return [int1,int2]
elif other.bounds[0] in self:
bounds = (self.bounds[0],other.bounds[0])
include = (self.included[0],not other.included[0])
#-------
# *********
return [int1]
elif other.bounds[1] in self:
# -------
#*******
return [int2]
else:
raise RuntimeError("This should not happen")
def _union(self,other):
#the set of intervals is not closed w.r.t the union, as it might
#yield one or two intervals as a result. Therefore this method
#is only used as a utility function for IntervalSet.
if self.is_empty() and other.is_empty():
return []
elif self.is_empty():
return [other]
elif other.is_empty():
return [self]
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
elif self.bounds[0] > other.bounds[0]:
i2,i1 = self,other
else:
if self.included[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if i1.is_disjoint(i2):
return [i1,i2]
elif i2.bounds[0] in i1 and i2.bounds[1] in i1:
#-------
# ***
return [i1]
elif i2.bounds[0] in i1:
bounds = (i1.bounds[0],i2.bounds[1])
include = (i1.included[0],i2.included[1])
#-------
# *********
return [Interval(bounds,include)]
else:
raise RuntimeError("This should not happen")
def intersection(self,other):
"""
Return a new Interval with the intersection of the two intervals,
i.e. all elements that are in both self and other.
:param Interval other: Interval to intersect with
:rtype: Interval
"""
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if self.is_disjoint(other):
return Interval((1,0),(True,True))
bounds = [None,None]
included = [None,None]
#sets are not disjoint, so i2.bounds[0] in i1:
bounds[0] = i2.bounds[0]
included[0] = i2.included[0]
if i2.bounds[1] in i1:
bounds[1] = i2.bounds[1]
included[1] = i2.included[1]
else:
bounds[1] = i1.bounds[1]
included[1] = i1.included[1]
return Interval(bounds,included)
def is_empty(self):
"""
Check whether this interval is empty.
:rtype: bool
"""
if self.bounds[1] < self.bounds[0]:
return True
if self.bounds[1] == self.bounds[0]:
return not (self.included[0] and self.included[1])
def get_point(self):
"""
Return the number contained in this interval.
:rtype: float
:raises ValueError: if Interval contains more than exactly one number.
"""
if not self.is_discrete():
raise ValueError("Interval doesn't contain exactly one value")
return self.bounds[0]
def __contains__(self,x):
"""
Check membership of the element.
:param float x: Element to check membership of
:rtype: bool
"""
if self.is_empty():
return False
if self.included[0]:
if not (x >= self.bounds[0]):
return False
else:
if not (x > self.bounds[0]):
return False
if self.included[1]:
if not (x <= self.bounds[1]):
return False
else:
if not (x < self.bounds[1]):
return False
return True
def __repr__(self):
if self.is_empty():
return "Interval((1,0),(False,False))"
return "Interval(%s,%s)" % (self.bounds,self.included)
def __str__(self):
if self.is_empty():
return "<empty set>"
else:
left = ["(","["]
right = [")","]"]
bnd = "%s,%s" % self.bounds
brk = (left[self.included[0]],right[self.included[1]])
return "%s%s%s" % (brk[0],bnd,brk[1])
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | IntervalSet.iter_members | python | def iter_members(self):
if not self.is_discrete():
raise ValueError("non-discrete IntervalSet can not be iterated")
for i in self.ints:
yield i.get_point() | Iterate over all elements of the set.
:raises ValueError: if self is a set of everything | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L370-L379 | [
"def is_discrete(self):\n \"\"\"\n Check whether this IntervalSet contains only isolated numbers.\n\n :rtype: bool\n \"\"\"\n for i in self.ints:\n if not i.is_discrete():\n return False\n return True\n"
] | class IntervalSet(object):
"""
A set of intervals to represent quite general sets in R
"""
def __init__(self,ints):
"""
Create a new IntervalSet.
:param sequence ints: Intervals for this IntervalSet
"""
self.ints = []
for i in sorted(ints,key=lambda x: x.bounds[0]):
if i.is_empty():
continue
if len(self.ints) > 0 and not i.is_disjoint(self.ints[-1]):
i2 = self.ints.pop(-1)
self.ints.extend(i2._union(i))
else:
self.ints.append(i)
for i1,i2 in pairwise(self.ints):
if not i1.is_disjoint(i2):
raise ValueError('Intervals are not disjoint')
@classmethod
def everything(cls):
"""
Create a new IntervalSet representing the full real axis.
"""
return cls([Interval.everything()])
@classmethod
def from_values(cls,values):
"""
Create a new IntervalSet representing a set of isolated real numbers.
:param sequence values: The values for this IntervalSet
"""
return cls([Interval.from_value(v) for v in values])
def is_empty(self):
"""
Check whether this IntervalSet is empty.
:rtype: bool
"""
return len(self.ints) == 0
def is_discrete(self):
"""
Check whether this IntervalSet contains only isolated numbers.
:rtype: bool
"""
for i in self.ints:
if not i.is_discrete():
return False
return True
def intersection(self,other):
"""
Return a new IntervalSet with the intersection of the two sets, i.e.
all elements that are both in self and other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet
"""
res = []
for i1 in self.ints:
for i2 in other.ints:
res.append(i1.intersection(i2))
return IntervalSet(res)
def union(self,other):
"""
Return a new IntervalSet with the union of the two sets, i.e.
all elements that are in self or other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet
"""
return IntervalSet(self.ints + other.ints)
def difference(self,other):
"""
Return a new IntervalSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param IntervalSet other: Set to subtract
:rtype: IntervalSet
"""
res = IntervalSet.everything()
for j in other.ints:
tmp = []
for i in self.ints:
tmp.extend(i._difference(j))
res = res.intersection(IntervalSet(tmp))
return res
def __contains__(self,x):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
for interval in self.ints:
if x in interval:
return True
return False
def __str__(self):
if self.is_empty():
return "<empty interval set>"
else:
return " u ".join(str(i) for i in self.ints)
def __repr__(self):
return "IntervalSet([%s])" % ",".join(i.__repr__() for i in self.ints)
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | IntervalSet.intersection | python | def intersection(self,other):
res = []
for i1 in self.ints:
for i2 in other.ints:
res.append(i1.intersection(i2))
return IntervalSet(res) | Return a new IntervalSet with the intersection of the two sets, i.e.
all elements that are both in self and other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L381-L394 | null | class IntervalSet(object):
"""
A set of intervals to represent quite general sets in R
"""
def __init__(self,ints):
"""
Create a new IntervalSet.
:param sequence ints: Intervals for this IntervalSet
"""
self.ints = []
for i in sorted(ints,key=lambda x: x.bounds[0]):
if i.is_empty():
continue
if len(self.ints) > 0 and not i.is_disjoint(self.ints[-1]):
i2 = self.ints.pop(-1)
self.ints.extend(i2._union(i))
else:
self.ints.append(i)
for i1,i2 in pairwise(self.ints):
if not i1.is_disjoint(i2):
raise ValueError('Intervals are not disjoint')
@classmethod
def everything(cls):
"""
Create a new IntervalSet representing the full real axis.
"""
return cls([Interval.everything()])
@classmethod
def from_values(cls,values):
"""
Create a new IntervalSet representing a set of isolated real numbers.
:param sequence values: The values for this IntervalSet
"""
return cls([Interval.from_value(v) for v in values])
def is_empty(self):
"""
Check whether this IntervalSet is empty.
:rtype: bool
"""
return len(self.ints) == 0
def is_discrete(self):
"""
Check whether this IntervalSet contains only isolated numbers.
:rtype: bool
"""
for i in self.ints:
if not i.is_discrete():
return False
return True
def iter_members(self):
"""
Iterate over all elements of the set.
:raises ValueError: if self is a set of everything
"""
if not self.is_discrete():
raise ValueError("non-discrete IntervalSet can not be iterated")
for i in self.ints:
yield i.get_point()
def union(self,other):
"""
Return a new IntervalSet with the union of the two sets, i.e.
all elements that are in self or other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet
"""
return IntervalSet(self.ints + other.ints)
def difference(self,other):
"""
Return a new IntervalSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param IntervalSet other: Set to subtract
:rtype: IntervalSet
"""
res = IntervalSet.everything()
for j in other.ints:
tmp = []
for i in self.ints:
tmp.extend(i._difference(j))
res = res.intersection(IntervalSet(tmp))
return res
def __contains__(self,x):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
for interval in self.ints:
if x in interval:
return True
return False
def __str__(self):
if self.is_empty():
return "<empty interval set>"
else:
return " u ".join(str(i) for i in self.ints)
def __repr__(self):
return "IntervalSet([%s])" % ",".join(i.__repr__() for i in self.ints)
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | IntervalSet.difference | python | def difference(self,other):
res = IntervalSet.everything()
for j in other.ints:
tmp = []
for i in self.ints:
tmp.extend(i._difference(j))
res = res.intersection(IntervalSet(tmp))
return res | Return a new IntervalSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param IntervalSet other: Set to subtract
:rtype: IntervalSet | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L406-L420 | [
"def everything(cls):\n \"\"\"\n Create a new IntervalSet representing the full real axis.\n \"\"\"\n return cls([Interval.everything()])\n",
"def intersection(self,other):\n \"\"\"\n Return a new IntervalSet with the intersection of the two sets, i.e.\n all elements that are both in self and other.\n\n :param IntervalSet other: Set to intersect with\n :rtype: IntervalSet\n \"\"\"\n res = []\n for i1 in self.ints:\n for i2 in other.ints:\n res.append(i1.intersection(i2))\n\n return IntervalSet(res)\n"
] | class IntervalSet(object):
"""
A set of intervals to represent quite general sets in R
"""
def __init__(self,ints):
"""
Create a new IntervalSet.
:param sequence ints: Intervals for this IntervalSet
"""
self.ints = []
for i in sorted(ints,key=lambda x: x.bounds[0]):
if i.is_empty():
continue
if len(self.ints) > 0 and not i.is_disjoint(self.ints[-1]):
i2 = self.ints.pop(-1)
self.ints.extend(i2._union(i))
else:
self.ints.append(i)
for i1,i2 in pairwise(self.ints):
if not i1.is_disjoint(i2):
raise ValueError('Intervals are not disjoint')
@classmethod
def everything(cls):
"""
Create a new IntervalSet representing the full real axis.
"""
return cls([Interval.everything()])
@classmethod
def from_values(cls,values):
"""
Create a new IntervalSet representing a set of isolated real numbers.
:param sequence values: The values for this IntervalSet
"""
return cls([Interval.from_value(v) for v in values])
def is_empty(self):
"""
Check whether this IntervalSet is empty.
:rtype: bool
"""
return len(self.ints) == 0
def is_discrete(self):
"""
Check whether this IntervalSet contains only isolated numbers.
:rtype: bool
"""
for i in self.ints:
if not i.is_discrete():
return False
return True
def iter_members(self):
"""
Iterate over all elements of the set.
:raises ValueError: if self is a set of everything
"""
if not self.is_discrete():
raise ValueError("non-discrete IntervalSet can not be iterated")
for i in self.ints:
yield i.get_point()
def intersection(self,other):
"""
Return a new IntervalSet with the intersection of the two sets, i.e.
all elements that are both in self and other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet
"""
res = []
for i1 in self.ints:
for i2 in other.ints:
res.append(i1.intersection(i2))
return IntervalSet(res)
def union(self,other):
"""
Return a new IntervalSet with the union of the two sets, i.e.
all elements that are in self or other.
:param IntervalSet other: Set to intersect with
:rtype: IntervalSet
"""
return IntervalSet(self.ints + other.ints)
def __contains__(self,x):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
for interval in self.ints:
if x in interval:
return True
return False
def __str__(self):
if self.is_empty():
return "<empty interval set>"
else:
return " u ".join(str(i) for i in self.ints)
def __repr__(self):
return "IntervalSet([%s])" % ",".join(i.__repr__() for i in self.ints)
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | DiscreteSet.intersection | python | def intersection(self,other):
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements)) | Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L492-L509 | null | class DiscreteSet(object):
"""
A set data structure for hashable elements
This is a wrapper around pythons set type, which additionally provides
the possibility to express the set of everything (which only makes sense
sometimes).
"""
def __init__(self,elements):
"""
Create a new DiscreteSet
:param sequence elements: The elements of the newly created set
"""
self.everything = False
self.elements = frozenset(elements)
@classmethod
def everything(cls):
"""
Create a new set of everything.
One can not iterate over the elements of this set, but many
operations are actually well defined and useful.
"""
res = cls([])
res.everything = True
return res
def is_empty(self):
"""
Check whether the set is empty
:rtype: bool
"""
if self.everything:
return False
return len(self.elements) == 0
def is_discrete(self):
"""
Check whether the set is discrete, i.e. if :meth:`iter_members` can
be used.
:rtype: bool
"""
return not self.everything
def difference(self,other):
"""
Return a new DiscreteSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param DiscreteSet other: Set to subtract
:rtype: DiscreteSet
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not remove from everything")
elif other.everything:
return DiscreteSet([])
else:
return DiscreteSet(self.elements.difference(other.elements))
def union(self,other):
"""
Return a new DiscreteSet with the union of the two sets, i.e.
all elements that are in self or in other.
:param DiscreteSet other: Set to unite with
:rtype: DiscreteSet
"""
if self.everything:
return self
elif other.everything:
return other
else:
return DiscreteSet(self.elements.union(other.elements))
def iter_members(self):
"""
Iterate over all elements of the set.
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not iterate everything")
for coord in sorted(self.elements):
yield coord
def __contains__(self,element):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
if self.everything:
return True
return element in self.elements
def __str__(self):
if self.is_empty():
return "<empty discrete set>"
else:
return "{%s}" % ",".join(str(e) for e in sorted(self.elements))
def __repr__(self):
if self.everything:
return "DiscreteSet.everything()"
return "DiscreteSet([%s])" % ",".join(i.__repr__() for i in sorted(self.elements))
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | DiscreteSet.difference | python | def difference(self,other):
if self.everything:
raise ValueError("Can not remove from everything")
elif other.everything:
return DiscreteSet([])
else:
return DiscreteSet(self.elements.difference(other.elements)) | Return a new DiscreteSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param DiscreteSet other: Set to subtract
:rtype: DiscreteSet
:raises ValueError: if self is a set of everything | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L511-L525 | null | class DiscreteSet(object):
"""
A set data structure for hashable elements
This is a wrapper around pythons set type, which additionally provides
the possibility to express the set of everything (which only makes sense
sometimes).
"""
def __init__(self,elements):
"""
Create a new DiscreteSet
:param sequence elements: The elements of the newly created set
"""
self.everything = False
self.elements = frozenset(elements)
@classmethod
def everything(cls):
"""
Create a new set of everything.
One can not iterate over the elements of this set, but many
operations are actually well defined and useful.
"""
res = cls([])
res.everything = True
return res
def is_empty(self):
"""
Check whether the set is empty
:rtype: bool
"""
if self.everything:
return False
return len(self.elements) == 0
def is_discrete(self):
"""
Check whether the set is discrete, i.e. if :meth:`iter_members` can
be used.
:rtype: bool
"""
return not self.everything
def intersection(self,other):
"""
Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet
"""
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements))
def union(self,other):
"""
Return a new DiscreteSet with the union of the two sets, i.e.
all elements that are in self or in other.
:param DiscreteSet other: Set to unite with
:rtype: DiscreteSet
"""
if self.everything:
return self
elif other.everything:
return other
else:
return DiscreteSet(self.elements.union(other.elements))
def iter_members(self):
"""
Iterate over all elements of the set.
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not iterate everything")
for coord in sorted(self.elements):
yield coord
def __contains__(self,element):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
if self.everything:
return True
return element in self.elements
def __str__(self):
if self.is_empty():
return "<empty discrete set>"
else:
return "{%s}" % ",".join(str(e) for e in sorted(self.elements))
def __repr__(self):
if self.everything:
return "DiscreteSet.everything()"
return "DiscreteSet([%s])" % ",".join(i.__repr__() for i in sorted(self.elements))
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | DiscreteSet.union | python | def union(self,other):
if self.everything:
return self
elif other.everything:
return other
else:
return DiscreteSet(self.elements.union(other.elements)) | Return a new DiscreteSet with the union of the two sets, i.e.
all elements that are in self or in other.
:param DiscreteSet other: Set to unite with
:rtype: DiscreteSet | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L527-L540 | null | class DiscreteSet(object):
"""
A set data structure for hashable elements
This is a wrapper around pythons set type, which additionally provides
the possibility to express the set of everything (which only makes sense
sometimes).
"""
def __init__(self,elements):
"""
Create a new DiscreteSet
:param sequence elements: The elements of the newly created set
"""
self.everything = False
self.elements = frozenset(elements)
@classmethod
def everything(cls):
"""
Create a new set of everything.
One can not iterate over the elements of this set, but many
operations are actually well defined and useful.
"""
res = cls([])
res.everything = True
return res
def is_empty(self):
"""
Check whether the set is empty
:rtype: bool
"""
if self.everything:
return False
return len(self.elements) == 0
def is_discrete(self):
"""
Check whether the set is discrete, i.e. if :meth:`iter_members` can
be used.
:rtype: bool
"""
return not self.everything
def intersection(self,other):
"""
Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet
"""
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements))
def difference(self,other):
"""
Return a new DiscreteSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param DiscreteSet other: Set to subtract
:rtype: DiscreteSet
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not remove from everything")
elif other.everything:
return DiscreteSet([])
else:
return DiscreteSet(self.elements.difference(other.elements))
def iter_members(self):
"""
Iterate over all elements of the set.
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not iterate everything")
for coord in sorted(self.elements):
yield coord
def __contains__(self,element):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
if self.everything:
return True
return element in self.elements
def __str__(self):
if self.is_empty():
return "<empty discrete set>"
else:
return "{%s}" % ",".join(str(e) for e in sorted(self.elements))
def __repr__(self):
if self.everything:
return "DiscreteSet.everything()"
return "DiscreteSet([%s])" % ",".join(i.__repr__() for i in sorted(self.elements))
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | DiscreteSet.iter_members | python | def iter_members(self):
if self.everything:
raise ValueError("Can not iterate everything")
for coord in sorted(self.elements):
yield coord | Iterate over all elements of the set.
:raises ValueError: if self is a set of everything | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L542-L551 | null | class DiscreteSet(object):
"""
A set data structure for hashable elements
This is a wrapper around pythons set type, which additionally provides
the possibility to express the set of everything (which only makes sense
sometimes).
"""
def __init__(self,elements):
"""
Create a new DiscreteSet
:param sequence elements: The elements of the newly created set
"""
self.everything = False
self.elements = frozenset(elements)
@classmethod
def everything(cls):
"""
Create a new set of everything.
One can not iterate over the elements of this set, but many
operations are actually well defined and useful.
"""
res = cls([])
res.everything = True
return res
def is_empty(self):
"""
Check whether the set is empty
:rtype: bool
"""
if self.everything:
return False
return len(self.elements) == 0
def is_discrete(self):
"""
Check whether the set is discrete, i.e. if :meth:`iter_members` can
be used.
:rtype: bool
"""
return not self.everything
def intersection(self,other):
"""
Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet
"""
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements))
def difference(self,other):
"""
Return a new DiscreteSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param DiscreteSet other: Set to subtract
:rtype: DiscreteSet
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not remove from everything")
elif other.everything:
return DiscreteSet([])
else:
return DiscreteSet(self.elements.difference(other.elements))
def union(self,other):
"""
Return a new DiscreteSet with the union of the two sets, i.e.
all elements that are in self or in other.
:param DiscreteSet other: Set to unite with
:rtype: DiscreteSet
"""
if self.everything:
return self
elif other.everything:
return other
else:
return DiscreteSet(self.elements.union(other.elements))
def __contains__(self,element):
"""
Check membership of the element.
:param element: Element to check membership of
:rtype: bool
"""
if self.everything:
return True
return element in self.elements
def __str__(self):
if self.is_empty():
return "<empty discrete set>"
else:
return "{%s}" % ",".join(str(e) for e in sorted(self.elements))
def __repr__(self):
if self.everything:
return "DiscreteSet.everything()"
return "DiscreteSet([%s])" % ",".join(i.__repr__() for i in sorted(self.elements))
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Patch.intersection | python | def intersection(self,other):
"intersection with another patch"
res = {}
if set(self.sets.keys()) != set(other.sets.keys()):
raise KeyError('Incompatible patches in intersection')
for name,s1 in self.sets.items():
s2 = other.sets[name]
res[name] = s1.intersection(s2)
return Patch(res) | intersection with another patch | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L602-L610 | null | class Patch(object):
def __init__(self,sets):
"""
A patch of multidimensional parameter space
sets is a dict of names to DiscreteSet or IntervalSets of feasible
values and represents the cartesion product of these
"""
self.sets = sets
self.discrete = True
self.empty = False
for s in sets.values():
if isinstance(s,IntervalSet) and not s.is_discrete():
self.discrete = False
if s.is_empty():
self.empty = True
def is_empty(self):
return self.empty
def is_discrete(self):
return self.discrete
def iter_points(self):
"returns a list of tuples of names and values"
if not self.is_discrete():
raise ValueError("Patch is not discrete")
names = sorted(self.sets.keys())
icoords = [self.sets[name].iter_members() for name in names]
for coordinates in product(*icoords):
yield tuple(zip(names,coordinates))
def __contains__(self,point):
for name, coord in point.items():
if not coord in self.sets[name]:
return False
return True
def __str__(self):
if self.is_empty():
return "<empty patch>"
else:
sets = ["%s:%s" % (n,str(i)) for n,i in self.sets.items()]
return " x ".join(sets)
|
jreinhardt/constraining-order | src/constrainingorder/sets.py | Patch.iter_points | python | def iter_points(self):
"returns a list of tuples of names and values"
if not self.is_discrete():
raise ValueError("Patch is not discrete")
names = sorted(self.sets.keys())
icoords = [self.sets[name].iter_members() for name in names]
for coordinates in product(*icoords):
yield tuple(zip(names,coordinates)) | returns a list of tuples of names and values | train | https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L612-L619 | [
"def is_discrete(self):\n return self.discrete\n"
] | class Patch(object):
def __init__(self,sets):
"""
A patch of multidimensional parameter space
sets is a dict of names to DiscreteSet or IntervalSets of feasible
values and represents the cartesion product of these
"""
self.sets = sets
self.discrete = True
self.empty = False
for s in sets.values():
if isinstance(s,IntervalSet) and not s.is_discrete():
self.discrete = False
if s.is_empty():
self.empty = True
def is_empty(self):
return self.empty
def is_discrete(self):
return self.discrete
def intersection(self,other):
"intersection with another patch"
res = {}
if set(self.sets.keys()) != set(other.sets.keys()):
raise KeyError('Incompatible patches in intersection')
for name,s1 in self.sets.items():
s2 = other.sets[name]
res[name] = s1.intersection(s2)
return Patch(res)
def __contains__(self,point):
for name, coord in point.items():
if not coord in self.sets[name]:
return False
return True
def __str__(self):
if self.is_empty():
return "<empty patch>"
else:
sets = ["%s:%s" % (n,str(i)) for n,i in self.sets.items()]
return " x ".join(sets)
|
NerdWalletOSS/savage | src/savage/api/data.py | delete | python | def delete(table, session, conds):
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
) | Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L8-L29 | [
"def _get_conditions_list(table, conds, archive=True):\n \"\"\"This function returns a list of list of == conditions on sqlalchemy columns given conds.\n This should be treated as an or of ands.\n\n :param table: the user table model class which inherits from\n savage.models.SavageModelMixin\n :param conds: a list of dictionaries of key value pairs where keys are column names and\n values are conditions to be placed on the column.\n :param archive: If true, the condition is with columns from the archive table. Else its from\n the user table.\n \"\"\"\n if conds is None:\n conds = []\n\n all_conditions = []\n for cond in conds:\n if len(cond) != len(table.version_columns):\n raise ValueError('Conditions must specify all unique constraints.')\n\n conditions = []\n t = table.ArchiveTable if archive else table\n\n for col_name, value in cond.iteritems():\n if col_name not in table.version_columns:\n raise ValueError('{} is not one of the unique columns <{}>'.format(\n col_name, ','.join(table.version_columns)\n ))\n conditions.append(getattr(t, col_name) == value)\n all_conditions.append(conditions)\n return all_conditions\n",
"def _get_conditions(pk_conds, and_conds=None):\n \"\"\"If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],\n this function will return the mysql condition clause:\n a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))\n\n :param pk_conds: a list of list of primary key constraints returned by _get_conditions_list\n :param and_conds: additional and conditions to be placed on the query\n \"\"\"\n if and_conds is None:\n and_conds = []\n\n if len(and_conds) == 0 and len(pk_conds) == 0:\n return sa.and_()\n\n condition1 = sa.and_(*and_conds)\n condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])\n return sa.and_(condition1, condition2)\n"
] | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | get | python | def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names) | :param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L32-L101 | [
"def _get_limit_and_offset(page, page_size):\n \"\"\"Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.\n \"\"\"\n if page < 1:\n raise ValueError('page must be >= 1')\n limit = page_size\n offset = (page - 1) * page_size\n return limit, offset\n",
"def get_column_names(table):\n \"\"\"Return a generator of names of the name of the column in the sql table.\"\"\"\n return (name for _, name in get_column_keys_and_names(table))\n",
"def _format_response(rows, fields, unique_col_names):\n \"\"\"This function will look at the data column of rows and extract the specified fields. It\n will also dedup changes where the specified fields have not changed. The list of rows should\n be ordered by the compound primary key which versioning pivots around and be in ascending\n version order.\n\n This function will return a list of dictionaries where each dictionary has the following\n schema:\n {\n 'updated_at': timestamp of the change,\n 'version': version number for the change,\n 'data': a nested dictionary containing all keys specified in fields and values\n corresponding to values in the user table.\n }\n\n Note that some versions may be omitted in the output for the same key if the specified fields\n were not changed between versions.\n\n :param rows: a list of dictionaries representing rows from the ArchiveTable.\n :param fields: a list of strings of fields to be extracted from the archived row.\n \"\"\"\n output = []\n old_id = None\n for row in rows:\n id_ = {k: row[k] for k in unique_col_names}\n formatted = {k: row[k] for k in row if k != 'data'}\n if id_ != old_id: # new unique versioned row\n data = row['data']\n formatted['data'] = {k: data.get(k) for k in fields}\n output.append(formatted)\n else:\n data = row['data']\n pruned_data = {k: data.get(k) for k in fields}\n if (\n pruned_data != output[-1]['data'] or\n row['deleted'] != output[-1]['deleted']\n ):\n formatted['data'] = pruned_data\n output.append(formatted)\n old_id = id_\n return output\n",
"def result_to_dict(res):\n \"\"\"\n :param res: :any:`sqlalchemy.engine.ResultProxy`\n\n :return: a list of dicts where each dict represents a row in the query where the key \\\n is the column name and the value is the value of that column.\n \"\"\"\n keys = res.keys()\n return [dict(itertools.izip(keys, row)) for row in res]\n",
"def _get_order_clause(archive_table):\n \"\"\"Returns an ascending order clause on the versioned unique constraint as well as the\n version column.\n \"\"\"\n order_clause = [\n sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names\n ]\n order_clause.append(sa.asc(archive_table.version_id))\n return order_clause\n",
"def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):\n and_clause = _get_conditions(\n _get_conditions_list(table, conds, archive=False),\n [] if include_deleted else [table.ArchiveTable.deleted.is_(False)],\n )\n result = session.execute(\n sa.select([table.ArchiveTable]).select_from(\n table.ArchiveTable.__table__.join(\n table,\n sa.and_(\n table.ArchiveTable.version_id == table.version_id,\n *[\n getattr(table.ArchiveTable, col_name) == getattr(table, col_name)\n for col_name in table.version_columns\n ]\n )\n )\n )\n .where(and_clause)\n .order_by(*_get_order_clause(table.ArchiveTable))\n .limit(limit)\n .offset(offset)\n )\n return utils.result_to_dict(result)\n",
"def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):\n at = table.ArchiveTable\n vc = table.version_columns\n pk_conditions = _get_conditions_list(table, conds)\n and_clause = _get_conditions(\n pk_conditions,\n [at.updated_at <= t] +\n [] if include_deleted else [table.ArchiveTable.deleted.is_(False)],\n )\n t2 = at.__table__.alias('t2')\n return utils.result_to_dict(session.execute(\n sa.select([at])\n .select_from(at.__table__.join(\n t2,\n sa.and_(\n t2.c.updated_at <= t,\n at.version_id < t2.c.version_id,\n *[getattr(at, c) == getattr(t2.c, c) for c in vc]\n ),\n isouter=True,\n ))\n .where(t2.c.version_id.is_(None) & and_clause)\n .order_by(*_get_order_clause(at))\n .limit(limit)\n .offset(offset)\n ))\n",
"def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):\n pk_conditions = _get_conditions_list(table, conds)\n and_clause = _get_conditions(\n pk_conditions,\n [table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +\n [] if include_deleted else [table.ArchiveTable.deleted.is_(False)],\n )\n\n return utils.result_to_dict(session.execute(\n sa.select([table.ArchiveTable])\n .where(and_clause)\n .order_by(*_get_order_clause(table.ArchiveTable))\n .limit(limit)\n .offset(offset)\n ))\n"
] | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | _format_response | python | def _format_response(rows, fields, unique_col_names):
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output | This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L104-L144 | null | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | _get_conditions | python | def _get_conditions(pk_conds, and_conds=None):
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2) | If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L147-L163 | null | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | _get_conditions_list | python | def _get_conditions_list(table, conds, archive=True):
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions | This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L166-L195 | null | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | _get_limit_and_offset | python | def _get_limit_and_offset(page, page_size):
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset | Returns a 0-indexed offset and limit based on page and page_size for a MySQL query. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L269-L276 | null | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause
|
NerdWalletOSS/savage | src/savage/api/data.py | _get_order_clause | python | def _get_order_clause(archive_table):
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause | Returns an ascending order clause on the versioned unique constraint as well as the
version column. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L279-L287 | null | from datetime import datetime
import sqlalchemy as sa
from savage import utils
def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
)
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2)
def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions
def _get_historical_changes(table, session, conds, t1, t2, include_deleted, limit, offset):
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[table.ArchiveTable.updated_at >= t1, table.ArchiveTable.updated_at < t2] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
return utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
))
def _get_historical_time_slice(table, session, t, conds, include_deleted, limit, offset):
at = table.ArchiveTable
vc = table.version_columns
pk_conditions = _get_conditions_list(table, conds)
and_clause = _get_conditions(
pk_conditions,
[at.updated_at <= t] +
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
t2 = at.__table__.alias('t2')
return utils.result_to_dict(session.execute(
sa.select([at])
.select_from(at.__table__.join(
t2,
sa.and_(
t2.c.updated_at <= t,
at.version_id < t2.c.version_id,
*[getattr(at, c) == getattr(t2.c, c) for c in vc]
),
isouter=True,
))
.where(t2.c.version_id.is_(None) & and_clause)
.order_by(*_get_order_clause(at))
.limit(limit)
.offset(offset)
))
def _get_latest_time_slice(table, session, conds, include_deleted, limit, offset):
and_clause = _get_conditions(
_get_conditions_list(table, conds, archive=False),
[] if include_deleted else [table.ArchiveTable.deleted.is_(False)],
)
result = session.execute(
sa.select([table.ArchiveTable]).select_from(
table.ArchiveTable.__table__.join(
table,
sa.and_(
table.ArchiveTable.version_id == table.version_id,
*[
getattr(table.ArchiveTable, col_name) == getattr(table, col_name)
for col_name in table.version_columns
]
)
)
)
.where(and_clause)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(limit)
.offset(offset)
)
return utils.result_to_dict(result)
def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset
|
NerdWalletOSS/savage | src/savage/utils.py | result_to_dict | python | def result_to_dict(res):
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res] | :param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L11-L19 | null | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor
def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
"""
:param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name)
"""
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
def get_dialect(session):
return session.bind.dialect
def has_constraint(model, engine, *col_names): # pragma: no cover
"""
:param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model
"""
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names)
def is_modified(row, dialect):
"""
Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False
"""
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/utils.py | get_bind_processor | python | def get_bind_processor(column_type, dialect):
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor | Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L22-L48 | null | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def result_to_dict(res):
"""
:param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column.
"""
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res]
def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
"""
:param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name)
"""
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
def get_dialect(session):
return session.bind.dialect
def has_constraint(model, engine, *col_names): # pragma: no cover
"""
:param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model
"""
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names)
def is_modified(row, dialect):
"""
Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False
"""
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/utils.py | get_column_attribute | python | def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None | :param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name) | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L51-L80 | [
"def get_bind_processor(column_type, dialect):\n \"\"\"\n Returns a bind processor for a column type and dialect, with special handling\n for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.\n\n NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8\n\n :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`\n :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`\n :return: bind processor for given column type and dialect\n \"\"\"\n if column_type.compile(dialect) not in {'JSON', 'JSONB'}:\n # For non-JSON/JSONB column types, return the column type's bind processor\n return column_type.bind_processor(dialect)\n\n if type(column_type) in {JSON, JSONB}:\n # For bare JSON/JSONB types, we simply skip bind processing altogether\n return None\n elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:\n # For decorated JSON/JSONB types, we return the custom bind processor (if any)\n return partial(column_type.process_bind_param, dialect=dialect)\n else:\n # For all other cases, we fall back to deserializing the result of the bind processor\n def wrapped_bind_processor(value):\n json_deserializer = dialect._json_deserializer or json.loads\n return json_deserializer(column_type.bind_processor(dialect)(value))\n return wrapped_bind_processor\n"
] | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def result_to_dict(res):
"""
:param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column.
"""
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res]
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
def get_dialect(session):
return session.bind.dialect
def has_constraint(model, engine, *col_names): # pragma: no cover
"""
:param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model
"""
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names)
def is_modified(row, dialect):
"""
Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False
"""
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/utils.py | get_column_keys_and_names | python | def get_column_keys_and_names(table):
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items()) | Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L93-L99 | null | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def result_to_dict(res):
"""
:param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column.
"""
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res]
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor
def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
"""
:param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name)
"""
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_dialect(session):
return session.bind.dialect
def has_constraint(model, engine, *col_names): # pragma: no cover
"""
:param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model
"""
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names)
def is_modified(row, dialect):
"""
Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False
"""
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/utils.py | has_constraint | python | def has_constraint(model, engine, *col_names): # pragma: no cover
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names) | :param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L106-L133 | null | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def result_to_dict(res):
"""
:param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column.
"""
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res]
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor
def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
"""
:param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name)
"""
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
def get_dialect(session):
return session.bind.dialect
def is_modified(row, dialect):
"""
Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False
"""
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/utils.py | is_modified | python | def is_modified(row, dialect):
ins = inspect(row)
modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified
for col_name in modified_cols:
current_value = get_column_attribute(row, col_name, dialect=dialect)
previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect)
if previous_value != current_value:
return True
return False | Has the row data been modified?
This method inspects the row, and iterates over all columns looking for changes
to the (processed) data, skipping over unmodified columns.
:param row: SQLAlchemy model instance
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: True if any columns were modified, else False | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L136-L154 | [
"def get_column_attribute(row, col_name, use_dirty=True, dialect=None):\n \"\"\"\n :param row: the row object\n :param col_name: the column name\n :param use_dirty: whether to return the dirty value of the column\n :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \\\n specified, this function will process the column attribute into the dialect type before \\\n returning it; useful if one is using user defined column types in their mappers.\n\n :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \\\n changed; else this will return getattr(row, col_name)\n \"\"\"\n def identity(x):\n return x\n\n bind_processor = None\n if dialect:\n column_type = getattr(type(row), col_name).type\n bind_processor = get_bind_processor(column_type, dialect)\n bind_processor = bind_processor or identity\n current_value = bind_processor(getattr(row, col_name))\n if use_dirty:\n return current_value\n\n hist = getattr(inspect(row).attrs, col_name).history\n if not hist.has_changes():\n return current_value\n elif hist.deleted:\n return bind_processor(hist.deleted[0])\n return None\n",
"def get_column_keys(table):\n \"\"\"Return a generator of names of the python attribute for the table columns.\"\"\"\n return (key for key, _ in get_column_keys_and_names(table))\n"
] | import datetime
import itertools
from functools import partial
import simplejson as json
from sqlalchemy import inspect, TypeDecorator, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.engine.reflection import Inspector
def result_to_dict(res):
"""
:param res: :any:`sqlalchemy.engine.ResultProxy`
:return: a list of dicts where each dict represents a row in the query where the key \
is the column name and the value is the value of that column.
"""
keys = res.keys()
return [dict(itertools.izip(keys, row)) for row in res]
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor
def get_column_attribute(row, col_name, use_dirty=True, dialect=None):
"""
:param row: the row object
:param col_name: the column name
:param use_dirty: whether to return the dirty value of the column
:param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \
specified, this function will process the column attribute into the dialect type before \
returning it; useful if one is using user defined column types in their mappers.
:return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \
changed; else this will return getattr(row, col_name)
"""
def identity(x):
return x
bind_processor = None
if dialect:
column_type = getattr(type(row), col_name).type
bind_processor = get_bind_processor(column_type, dialect)
bind_processor = bind_processor or identity
current_value = bind_processor(getattr(row, col_name))
if use_dirty:
return current_value
hist = getattr(inspect(row).attrs, col_name).history
if not hist.has_changes():
return current_value
elif hist.deleted:
return bind_processor(hist.deleted[0])
return None
def get_column_keys(table):
"""Return a generator of names of the python attribute for the table columns."""
return (key for key, _ in get_column_keys_and_names(table))
def get_column_names(table):
"""Return a generator of names of the name of the column in the sql table."""
return (name for _, name in get_column_keys_and_names(table))
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
def get_dialect(session):
return session.bind.dialect
def has_constraint(model, engine, *col_names): # pragma: no cover
"""
:param model: model class to check
:param engine: SQLAlchemy engine
:param col_names: the name of columns which the unique constraint should contain
:rtype: bool
:return: True if the given columns are part of a unique constraint on model
"""
table_name = model.__tablename__
if engine.dialect.has_table(engine, table_name):
# Use SQLAlchemy reflection to determine unique constraints
insp = Inspector.from_engine(engine)
constraints = itertools.chain(
(sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)),
sorted(insp.get_pk_constraint(table_name)['constrained_columns']),
)
return sorted(col_names) in constraints
else:
# Needed to validate test models pre-creation
constrained_cols = set()
for arg in getattr(model, '__table_args__', []):
if isinstance(arg, UniqueConstraint):
constrained_cols.update([c.name for c in arg.columns])
for c in model.__table__.columns:
if c.primary_key or c.unique:
constrained_cols.add(c.name)
return constrained_cols.issuperset(col_names)
class SavageJSONEncoder(json.JSONEncoder):
"""Extends the default encoder to add support for serializing datetime objects.
Currently, this uses the `datetime.isoformat()` method; the resulting string
can be reloaded into a MySQL/Postgres TIMESTAMP column directly.
(This was verified on MySQL 5.6 and Postgres 9.6)
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return super(SavageJSONEncoder, self).default(obj)
savage_json_serializer = partial(json.dumps, cls=SavageJSONEncoder)
|
NerdWalletOSS/savage | src/savage/__init__.py | _before_flush_handler | python | def _before_flush_handler(session, _flush_context, _instances):
dialect = get_dialect(session)
for row in session.dirty:
if isinstance(row, SavageModelMixin) and is_modified(row, dialect):
# Update row version_id
row.update_version_id() | Update version ID for all dirty, modified rows | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/__init__.py#L46-L52 | null | """
SQLAlchemy events integration to automatically archive data on archived model changes.
# Basic strategy
For all archived models, store a version ID which is set by default on insert
and updated when the model is updated. We use the PG function `txid_current()`,
which is guaranteed to be monotonically increasing. We rely on archiving data after flush
because server side generated values (like `id`/`version_id`) are only populated at that point.
*Before flush*: Check all dirty records and update row version ID if the record was modified.
*After flush*: Archive all new/deleted records, and any dirty records where version ID changed.
## Special note on deleted records
Because there is no new row version ID generated during a deletion, the archive row can't set its
version ID to the row version ID without leading to a DB integrity error. Instead, archive rows
for deleted data use `txid_current()` for version ID (see `SavageLogMixin.build_row_dict`).
"""
from sqlalchemy import event, insert, inspect
from sqlalchemy.orm import Session
from savage.exceptions import LogTableCreationError
from savage.models import SavageModelMixin
from savage.utils import get_column_attribute, get_dialect, is_modified
_initialized = False
def init():
global _initialized
if _initialized:
return
_initialized = True
event.listen(Session, 'before_flush', _before_flush_handler)
event.listen(Session, 'after_flush', _after_flush_handler)
def is_initialized():
global _initialized
return _initialized
def _after_flush_handler(session, _flush_context):
"""Archive all new/updated/deleted data"""
dialect = get_dialect(session)
handlers = [
(_versioned_delete, session.deleted),
(_versioned_insert, session.new),
(_versioned_update, session.dirty),
]
for handler, rows in handlers:
# TODO: Bulk archive insert statements
for row in rows:
if not isinstance(row, SavageModelMixin):
continue
if not hasattr(row, 'ArchiveTable'):
raise LogTableCreationError('Need to register Savage tables!!')
user_id = getattr(row, '_updated_by', None)
handler(row, session, user_id, dialect)
def _versioned_delete(row, *args):
_archive_row(row, *args, deleted=True)
def _versioned_insert(row, *args):
_archive_row(row, *args)
def _versioned_update(row, *args):
# Do nothing if version_id is unchanged
previous_version_id = get_column_attribute(row, 'version_id', use_dirty=False)
if previous_version_id == row.version_id:
return
# Check if composite key has been changed
row_attrs = inspect(row).attrs
composite_key_changed = any(
getattr(row_attrs, col).history.has_changes() for col in row.version_columns
)
if composite_key_changed:
# Add deleted archive entry for pre-changed state, but with current version_id
_archive_row(row, *args, deleted=True, use_dirty=False)
_archive_row(row, *args)
def _archive_row(row, session, user_id, dialect, **kwargs):
archive_table = row.ArchiveTable
archive_row_dict = archive_table.build_row_dict(row, dialect, user_id=user_id, **kwargs)
session.execute(insert(archive_table), archive_row_dict)
|
NerdWalletOSS/savage | src/savage/__init__.py | _after_flush_handler | python | def _after_flush_handler(session, _flush_context):
dialect = get_dialect(session)
handlers = [
(_versioned_delete, session.deleted),
(_versioned_insert, session.new),
(_versioned_update, session.dirty),
]
for handler, rows in handlers:
# TODO: Bulk archive insert statements
for row in rows:
if not isinstance(row, SavageModelMixin):
continue
if not hasattr(row, 'ArchiveTable'):
raise LogTableCreationError('Need to register Savage tables!!')
user_id = getattr(row, '_updated_by', None)
handler(row, session, user_id, dialect) | Archive all new/updated/deleted data | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/__init__.py#L55-L71 | null | """
SQLAlchemy events integration to automatically archive data on archived model changes.
# Basic strategy
For all archived models, store a version ID which is set by default on insert
and updated when the model is updated. We use the PG function `txid_current()`,
which is guaranteed to be monotonically increasing. We rely on archiving data after flush
because server side generated values (like `id`/`version_id`) are only populated at that point.
*Before flush*: Check all dirty records and update row version ID if the record was modified.
*After flush*: Archive all new/deleted records, and any dirty records where version ID changed.
## Special note on deleted records
Because there is no new row version ID generated during a deletion, the archive row can't set its
version ID to the row version ID without leading to a DB integrity error. Instead, archive rows
for deleted data use `txid_current()` for version ID (see `SavageLogMixin.build_row_dict`).
"""
from sqlalchemy import event, insert, inspect
from sqlalchemy.orm import Session
from savage.exceptions import LogTableCreationError
from savage.models import SavageModelMixin
from savage.utils import get_column_attribute, get_dialect, is_modified
_initialized = False
def init():
global _initialized
if _initialized:
return
_initialized = True
event.listen(Session, 'before_flush', _before_flush_handler)
event.listen(Session, 'after_flush', _after_flush_handler)
def is_initialized():
global _initialized
return _initialized
def _before_flush_handler(session, _flush_context, _instances):
"""Update version ID for all dirty, modified rows"""
dialect = get_dialect(session)
for row in session.dirty:
if isinstance(row, SavageModelMixin) and is_modified(row, dialect):
# Update row version_id
row.update_version_id()
def _versioned_delete(row, *args):
_archive_row(row, *args, deleted=True)
def _versioned_insert(row, *args):
_archive_row(row, *args)
def _versioned_update(row, *args):
# Do nothing if version_id is unchanged
previous_version_id = get_column_attribute(row, 'version_id', use_dirty=False)
if previous_version_id == row.version_id:
return
# Check if composite key has been changed
row_attrs = inspect(row).attrs
composite_key_changed = any(
getattr(row_attrs, col).history.has_changes() for col in row.version_columns
)
if composite_key_changed:
# Add deleted archive entry for pre-changed state, but with current version_id
_archive_row(row, *args, deleted=True, use_dirty=False)
_archive_row(row, *args)
def _archive_row(row, session, user_id, dialect, **kwargs):
archive_table = row.ArchiveTable
archive_row_dict = archive_table.build_row_dict(row, dialect, user_id=user_id, **kwargs)
session.execute(insert(archive_table), archive_row_dict)
|
NerdWalletOSS/savage | src/savage/models/__init__.py | SavageLogMixin.build_row_dict | python | def build_row_dict(cls, row, dialect, deleted=False, user_id=None, use_dirty=True):
data = {
'data': row.to_archivable_dict(dialect, use_dirty=use_dirty),
'deleted': deleted,
'updated_at': datetime.now(),
'version_id': current_version_sql(as_is=True) if deleted else row.version_id
}
for col_name in row.version_columns:
data[col_name] = utils.get_column_attribute(row, col_name, use_dirty=use_dirty)
if user_id is not None:
data['user_id'] = user_id
return data | Builds a dictionary of archive data from row which is suitable for insert.
NOTE: If `deleted` is False, version ID will be set to an AsIs SQL construct.
:param row: instance of :class:`~SavageModelMixin`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:param deleted: whether or not the row is deleted (defaults to False)
:param user_id: ID of user that is performing the update on this row (defaults to None)
:param use_dirty: whether to use the dirty fields from row or not (defaults to True)
:return: a dictionary of archive table column names to values, suitable for insert
:rtype: dict | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/models/__init__.py#L59-L83 | [
"def current_version_sql(as_is=False):\n sql_fn = 'txid_current()'\n if as_is:\n # NOTE: The AsIs construct allows raw SQL to be passed through in `SQLAlchemy.insert`\n return AsIs(sql_fn)\n return text(sql_fn)\n",
"def get_column_attribute(row, col_name, use_dirty=True, dialect=None):\n \"\"\"\n :param row: the row object\n :param col_name: the column name\n :param use_dirty: whether to return the dirty value of the column\n :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \\\n specified, this function will process the column attribute into the dialect type before \\\n returning it; useful if one is using user defined column types in their mappers.\n\n :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \\\n changed; else this will return getattr(row, col_name)\n \"\"\"\n def identity(x):\n return x\n\n bind_processor = None\n if dialect:\n column_type = getattr(type(row), col_name).type\n bind_processor = get_bind_processor(column_type, dialect)\n bind_processor = bind_processor or identity\n current_value = bind_processor(getattr(row, col_name))\n if use_dirty:\n return current_value\n\n hist = getattr(inspect(row).attrs, col_name).history\n if not hist.has_changes():\n return current_value\n elif hist.deleted:\n return bind_processor(hist.deleted[0])\n return None\n"
] | class SavageLogMixin(object):
"""
A mixin providing the schema for the log table, an append only table which saves old versions
of rows. An inheriting model must specify the following columns:
- user_id - a column corresponding to the user that made the specified change
- 1 or more columns which are a subset of columns in the user table. These columns
must have a unique constraint on the user table and also be named the same in both tables
"""
archive_id = Column(Integer, primary_key=True, autoincrement=True)
version_id = Column(BigInteger, nullable=False, index=True)
deleted = Column(Boolean, nullable=False)
updated_at = Column(DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
data = Column(postgresql.JSONB, nullable=False)
__mapper_args__ = {
'eager_defaults': True, # Avoid unnecessary select to fetch updated_at
'version_id_col': version_id,
'version_id_generator': False
}
@declared_attr
def __table_args__(cls):
return (
Index('index_{}_on_data_gin'.format(cls.__tablename__), 'data', postgresql_using='gin'),
)
@classmethod
@classmethod
def bulk_archive_rows(cls, rows, session, user_id=None, chunk_size=1000, commit=True):
"""
Bulk archives data previously written to DB.
:param rows: iterable of previously saved model instances to archive
:param session: DB session to use for inserts
:param user_id: ID of user responsible for row modifications
:return:
"""
dialect = utils.get_dialect(session)
to_insert_dicts = []
for row in rows:
row_dict = cls.build_row_dict(row, user_id=user_id, dialect=dialect)
to_insert_dicts.append(row_dict)
if len(to_insert_dicts) < chunk_size:
continue
# Insert a batch of rows
session.execute(insert(cls).values(to_insert_dicts))
to_insert_dicts = []
# Insert final batch of rows (if any)
if to_insert_dicts:
session.execute(insert(cls).values(to_insert_dicts))
if commit:
session.commit()
@classmethod
def _validate(cls, engine, *version_cols):
"""
Validates the archive table.
Validates the following criteria:
- all version columns exist in the archive table
- the python types of the user table and archive table columns are the same
- a user_id column exists
- there is a unique constraint on version and the other versioned columns from the
user table
:param engine: instance of :class:`~sqlalchemy.engine.Engine`
:param *version_cols: instances of :class:`~InstrumentedAttribute` from
the user table corresponding to the columns that versioning pivots around
:raises: :class:`~LogTableCreationError`
"""
cls._version_col_names = set()
for version_column_ut in version_cols:
# Make sure all version columns exist on this table
version_col_name = version_column_ut.key
version_column_at = getattr(cls, version_col_name, None)
if not isinstance(version_column_at, InstrumentedAttribute):
raise LogTableCreationError("Log table needs {} column".format(version_col_name))
# Make sure the type of the user table and log table columns are the same
version_col_at_t = version_column_at.property.columns[0].type.__class__
version_col_ut_t = version_column_ut.property.columns[0].type.__class__
if version_col_at_t != version_col_ut_t:
raise LogTableCreationError(
"Type of column {} must match in log and user table".format(version_col_name)
)
cls._version_col_names.add(version_col_name)
# Ensure user added a user_id column
# TODO: should user_id column be optional?
user_id = getattr(cls, 'user_id', None)
if not isinstance(user_id, InstrumentedAttribute):
raise LogTableCreationError("Log table needs user_id column")
# Check the unique constraint on the versioned columns
version_col_names = list(cls._version_col_names) + ['version_id']
if not utils.has_constraint(cls, engine, *version_col_names):
raise LogTableCreationError("There is no unique constraint on the version columns")
|
NerdWalletOSS/savage | src/savage/models/__init__.py | SavageLogMixin.bulk_archive_rows | python | def bulk_archive_rows(cls, rows, session, user_id=None, chunk_size=1000, commit=True):
dialect = utils.get_dialect(session)
to_insert_dicts = []
for row in rows:
row_dict = cls.build_row_dict(row, user_id=user_id, dialect=dialect)
to_insert_dicts.append(row_dict)
if len(to_insert_dicts) < chunk_size:
continue
# Insert a batch of rows
session.execute(insert(cls).values(to_insert_dicts))
to_insert_dicts = []
# Insert final batch of rows (if any)
if to_insert_dicts:
session.execute(insert(cls).values(to_insert_dicts))
if commit:
session.commit() | Bulk archives data previously written to DB.
:param rows: iterable of previously saved model instances to archive
:param session: DB session to use for inserts
:param user_id: ID of user responsible for row modifications
:return: | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/models/__init__.py#L86-L111 | [
"def get_dialect(session):\n return session.bind.dialect\n"
] | class SavageLogMixin(object):
"""
A mixin providing the schema for the log table, an append only table which saves old versions
of rows. An inheriting model must specify the following columns:
- user_id - a column corresponding to the user that made the specified change
- 1 or more columns which are a subset of columns in the user table. These columns
must have a unique constraint on the user table and also be named the same in both tables
"""
archive_id = Column(Integer, primary_key=True, autoincrement=True)
version_id = Column(BigInteger, nullable=False, index=True)
deleted = Column(Boolean, nullable=False)
updated_at = Column(DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
data = Column(postgresql.JSONB, nullable=False)
__mapper_args__ = {
'eager_defaults': True, # Avoid unnecessary select to fetch updated_at
'version_id_col': version_id,
'version_id_generator': False
}
@declared_attr
def __table_args__(cls):
return (
Index('index_{}_on_data_gin'.format(cls.__tablename__), 'data', postgresql_using='gin'),
)
@classmethod
def build_row_dict(cls, row, dialect, deleted=False, user_id=None, use_dirty=True):
"""
Builds a dictionary of archive data from row which is suitable for insert.
NOTE: If `deleted` is False, version ID will be set to an AsIs SQL construct.
:param row: instance of :class:`~SavageModelMixin`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:param deleted: whether or not the row is deleted (defaults to False)
:param user_id: ID of user that is performing the update on this row (defaults to None)
:param use_dirty: whether to use the dirty fields from row or not (defaults to True)
:return: a dictionary of archive table column names to values, suitable for insert
:rtype: dict
"""
data = {
'data': row.to_archivable_dict(dialect, use_dirty=use_dirty),
'deleted': deleted,
'updated_at': datetime.now(),
'version_id': current_version_sql(as_is=True) if deleted else row.version_id
}
for col_name in row.version_columns:
data[col_name] = utils.get_column_attribute(row, col_name, use_dirty=use_dirty)
if user_id is not None:
data['user_id'] = user_id
return data
@classmethod
@classmethod
def _validate(cls, engine, *version_cols):
"""
Validates the archive table.
Validates the following criteria:
- all version columns exist in the archive table
- the python types of the user table and archive table columns are the same
- a user_id column exists
- there is a unique constraint on version and the other versioned columns from the
user table
:param engine: instance of :class:`~sqlalchemy.engine.Engine`
:param *version_cols: instances of :class:`~InstrumentedAttribute` from
the user table corresponding to the columns that versioning pivots around
:raises: :class:`~LogTableCreationError`
"""
cls._version_col_names = set()
for version_column_ut in version_cols:
# Make sure all version columns exist on this table
version_col_name = version_column_ut.key
version_column_at = getattr(cls, version_col_name, None)
if not isinstance(version_column_at, InstrumentedAttribute):
raise LogTableCreationError("Log table needs {} column".format(version_col_name))
# Make sure the type of the user table and log table columns are the same
version_col_at_t = version_column_at.property.columns[0].type.__class__
version_col_ut_t = version_column_ut.property.columns[0].type.__class__
if version_col_at_t != version_col_ut_t:
raise LogTableCreationError(
"Type of column {} must match in log and user table".format(version_col_name)
)
cls._version_col_names.add(version_col_name)
# Ensure user added a user_id column
# TODO: should user_id column be optional?
user_id = getattr(cls, 'user_id', None)
if not isinstance(user_id, InstrumentedAttribute):
raise LogTableCreationError("Log table needs user_id column")
# Check the unique constraint on the versioned columns
version_col_names = list(cls._version_col_names) + ['version_id']
if not utils.has_constraint(cls, engine, *version_col_names):
raise LogTableCreationError("There is no unique constraint on the version columns")
|
NerdWalletOSS/savage | src/savage/models/__init__.py | SavageLogMixin._validate | python | def _validate(cls, engine, *version_cols):
cls._version_col_names = set()
for version_column_ut in version_cols:
# Make sure all version columns exist on this table
version_col_name = version_column_ut.key
version_column_at = getattr(cls, version_col_name, None)
if not isinstance(version_column_at, InstrumentedAttribute):
raise LogTableCreationError("Log table needs {} column".format(version_col_name))
# Make sure the type of the user table and log table columns are the same
version_col_at_t = version_column_at.property.columns[0].type.__class__
version_col_ut_t = version_column_ut.property.columns[0].type.__class__
if version_col_at_t != version_col_ut_t:
raise LogTableCreationError(
"Type of column {} must match in log and user table".format(version_col_name)
)
cls._version_col_names.add(version_col_name)
# Ensure user added a user_id column
# TODO: should user_id column be optional?
user_id = getattr(cls, 'user_id', None)
if not isinstance(user_id, InstrumentedAttribute):
raise LogTableCreationError("Log table needs user_id column")
# Check the unique constraint on the versioned columns
version_col_names = list(cls._version_col_names) + ['version_id']
if not utils.has_constraint(cls, engine, *version_col_names):
raise LogTableCreationError("There is no unique constraint on the version columns") | Validates the archive table.
Validates the following criteria:
- all version columns exist in the archive table
- the python types of the user table and archive table columns are the same
- a user_id column exists
- there is a unique constraint on version and the other versioned columns from the
user table
:param engine: instance of :class:`~sqlalchemy.engine.Engine`
:param *version_cols: instances of :class:`~InstrumentedAttribute` from
the user table corresponding to the columns that versioning pivots around
:raises: :class:`~LogTableCreationError` | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/models/__init__.py#L114-L156 | null | class SavageLogMixin(object):
"""
A mixin providing the schema for the log table, an append only table which saves old versions
of rows. An inheriting model must specify the following columns:
- user_id - a column corresponding to the user that made the specified change
- 1 or more columns which are a subset of columns in the user table. These columns
must have a unique constraint on the user table and also be named the same in both tables
"""
archive_id = Column(Integer, primary_key=True, autoincrement=True)
version_id = Column(BigInteger, nullable=False, index=True)
deleted = Column(Boolean, nullable=False)
updated_at = Column(DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
data = Column(postgresql.JSONB, nullable=False)
__mapper_args__ = {
'eager_defaults': True, # Avoid unnecessary select to fetch updated_at
'version_id_col': version_id,
'version_id_generator': False
}
@declared_attr
def __table_args__(cls):
return (
Index('index_{}_on_data_gin'.format(cls.__tablename__), 'data', postgresql_using='gin'),
)
@classmethod
def build_row_dict(cls, row, dialect, deleted=False, user_id=None, use_dirty=True):
"""
Builds a dictionary of archive data from row which is suitable for insert.
NOTE: If `deleted` is False, version ID will be set to an AsIs SQL construct.
:param row: instance of :class:`~SavageModelMixin`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:param deleted: whether or not the row is deleted (defaults to False)
:param user_id: ID of user that is performing the update on this row (defaults to None)
:param use_dirty: whether to use the dirty fields from row or not (defaults to True)
:return: a dictionary of archive table column names to values, suitable for insert
:rtype: dict
"""
data = {
'data': row.to_archivable_dict(dialect, use_dirty=use_dirty),
'deleted': deleted,
'updated_at': datetime.now(),
'version_id': current_version_sql(as_is=True) if deleted else row.version_id
}
for col_name in row.version_columns:
data[col_name] = utils.get_column_attribute(row, col_name, use_dirty=use_dirty)
if user_id is not None:
data['user_id'] = user_id
return data
@classmethod
def bulk_archive_rows(cls, rows, session, user_id=None, chunk_size=1000, commit=True):
"""
Bulk archives data previously written to DB.
:param rows: iterable of previously saved model instances to archive
:param session: DB session to use for inserts
:param user_id: ID of user responsible for row modifications
:return:
"""
dialect = utils.get_dialect(session)
to_insert_dicts = []
for row in rows:
row_dict = cls.build_row_dict(row, user_id=user_id, dialect=dialect)
to_insert_dicts.append(row_dict)
if len(to_insert_dicts) < chunk_size:
continue
# Insert a batch of rows
session.execute(insert(cls).values(to_insert_dicts))
to_insert_dicts = []
# Insert final batch of rows (if any)
if to_insert_dicts:
session.execute(insert(cls).values(to_insert_dicts))
if commit:
session.commit()
@classmethod
|
NerdWalletOSS/savage | src/savage/models/__init__.py | SavageModelMixin.register | python | def register(cls, archive_table, engine):
version_col_names = cls.version_columns
if not version_col_names:
raise LogTableCreationError('Need to specify version cols in cls.version_columns')
if cls.ignore_columns is None:
cls.ignore_columns = set()
cls.ignore_columns.add('version_id')
version_cols = [getattr(cls, col_name, None) for col_name in version_col_names]
cls._validate(engine, *version_cols)
archive_table._validate(engine, *version_cols)
cls.ArchiveTable = archive_table | :param archive_table: the model for the users archive table
:param engine: the database engine
:param version_col_names: strings which correspond to columns that versioning will pivot \
around. These columns must have a unique constraint set on them. | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/models/__init__.py#L176-L194 | null | class SavageModelMixin(object):
version_id = Column(
BigInteger,
nullable=False,
server_default=current_version_sql(),
onupdate=current_version_sql()
)
__mapper_args__ = {
'version_id_col': version_id,
'version_id_generator': False
}
ignore_columns = None
version_columns = None
@classmethod
def updated_by(self, user):
self._updated_by = user
def update_version_id(self):
self.version_id = current_version_sql()
def to_archivable_dict(self, dialect, use_dirty=True):
"""
:param dialect: a :py:class:`~sqlalchemy.engine.interfaces.Dialect` corresponding to the \
SQL dialect being used.
:param use_dirty: whether to make a dict of the fields as they stand, or the fields \
before the row was updated
:return: a dictionary of key value pairs representing this row.
:rtype: dict
"""
return {
cn: utils.get_column_attribute(self, c, use_dirty=use_dirty, dialect=dialect)
for c, cn in utils.get_column_keys_and_names(self)
if c not in self.ignore_columns
}
@classmethod
def _validate(cls, engine, *version_cols):
version_col_names = set()
for version_column_ut in version_cols:
if not isinstance(version_column_ut, InstrumentedAttribute):
raise LogTableCreationError("All version columns must be <InstrumentedAttribute>")
version_col_names.add(version_column_ut.key)
# Check the unique constraint on the versioned columns
insp = inspect(cls)
uc = sorted([col.name for col in insp.primary_key]) == sorted(version_col_names)
if not (uc or utils.has_constraint(cls, engine, *version_col_names)):
raise LogTableCreationError("There is no unique constraint on the version columns")
|
NerdWalletOSS/savage | src/savage/models/__init__.py | SavageModelMixin.to_archivable_dict | python | def to_archivable_dict(self, dialect, use_dirty=True):
return {
cn: utils.get_column_attribute(self, c, use_dirty=use_dirty, dialect=dialect)
for c, cn in utils.get_column_keys_and_names(self)
if c not in self.ignore_columns
} | :param dialect: a :py:class:`~sqlalchemy.engine.interfaces.Dialect` corresponding to the \
SQL dialect being used.
:param use_dirty: whether to make a dict of the fields as they stand, or the fields \
before the row was updated
:return: a dictionary of key value pairs representing this row.
:rtype: dict | train | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/models/__init__.py#L202-L216 | [
"def get_column_keys_and_names(table):\n \"\"\"\n Return a generator of tuples k, c such that k is the name of the python attribute for\n the column and c is the name of the column in the sql table.\n \"\"\"\n ins = inspect(table)\n return ((k, c.name) for k, c in ins.mapper.c.items())\n"
] | class SavageModelMixin(object):
version_id = Column(
BigInteger,
nullable=False,
server_default=current_version_sql(),
onupdate=current_version_sql()
)
__mapper_args__ = {
'version_id_col': version_id,
'version_id_generator': False
}
ignore_columns = None
version_columns = None
@classmethod
def register(cls, archive_table, engine):
"""
:param archive_table: the model for the users archive table
:param engine: the database engine
:param version_col_names: strings which correspond to columns that versioning will pivot \
around. These columns must have a unique constraint set on them.
"""
version_col_names = cls.version_columns
if not version_col_names:
raise LogTableCreationError('Need to specify version cols in cls.version_columns')
if cls.ignore_columns is None:
cls.ignore_columns = set()
cls.ignore_columns.add('version_id')
version_cols = [getattr(cls, col_name, None) for col_name in version_col_names]
cls._validate(engine, *version_cols)
archive_table._validate(engine, *version_cols)
cls.ArchiveTable = archive_table
def updated_by(self, user):
self._updated_by = user
def update_version_id(self):
self.version_id = current_version_sql()
@classmethod
def _validate(cls, engine, *version_cols):
version_col_names = set()
for version_column_ut in version_cols:
if not isinstance(version_column_ut, InstrumentedAttribute):
raise LogTableCreationError("All version columns must be <InstrumentedAttribute>")
version_col_names.add(version_column_ut.key)
# Check the unique constraint on the versioned columns
insp = inspect(cls)
uc = sorted([col.name for col in insp.primary_key]) == sorted(version_col_names)
if not (uc or utils.has_constraint(cls, engine, *version_col_names)):
raise LogTableCreationError("There is no unique constraint on the version columns")
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | main | python | def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict | Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L16-L51 | [
"def find_files(sequencepath):\n \"\"\"\n Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as\n .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported\n :param sequencepath: path of folder containing FASTA genomes\n :return: list of FASTA files\n \"\"\"\n # Create a sorted list of all the FASTA files in the sequence path\n files = sorted(glob(os.path.join(sequencepath, '*.fa*')))\n return files\n",
"def filer(filelist):\n \"\"\"\n Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)\n :param filelist: list of files to parse\n :return filedict: dictionary of stain name: /sequencepath/strain_name.extension\n \"\"\"\n # Initialise the dictionary\n filedict = dict()\n for seqfile in filelist:\n # Split off the file extension and remove the path from the name\n strainname = os.path.splitext(os.path.basename(seqfile))[0]\n # Populate the dictionary\n filedict[strainname] = seqfile\n return filedict\n",
"def find_genus(files, database, threads=12):\n \"\"\"\n Uses MASH to find the genus of fasta files.\n :param files: File dictionary returned by filer method.\n :param database: Path to reduced refseq database sketch.\n :param threads: Number of threads to run mash with.\n :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.\n \"\"\"\n genus_dict = dict()\n tmpdir = str(time.time()).split('.')[-1]\n if not os.path.isdir(tmpdir):\n os.makedirs(tmpdir)\n for file_name, fasta in files.items():\n mash.screen(database, fasta,\n threads=threads,\n w='',\n i=0.95,\n output_file=os.path.join(tmpdir, 'screen.tab'))\n screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))\n try:\n os.remove(os.path.join(tmpdir, 'screen.tab'))\n except IOError:\n pass\n try:\n genus = screen_output[0].query_id.split('/')[-3]\n if genus == 'Shigella':\n genus = 'Escherichia'\n genus_dict[file_name] = genus\n except IndexError:\n genus_dict[file_name] = 'NA'\n\n shutil.rmtree(tmpdir)\n return genus_dict\n",
"def fasta_records(files):\n \"\"\"\n Use SeqIO to create dictionaries of all records for each FASTA file\n :param files: dictionary of stain name: /sequencepath/strain_name.extension\n :return: file_records: dictionary of all contig records for all strains\n \"\"\"\n # Initialise the dictionary\n file_records = dict()\n for file_name, fasta in files.items():\n # Create a dictionary of records for each file\n record_dict = SeqIO.to_dict(SeqIO.parse(fasta, \"fasta\"))\n # Set the records dictionary as the value for file_records\n file_records[file_name] = record_dict\n return file_records\n",
"def fasta_stats(files, records):\n \"\"\"\n Parse the lengths of all contigs for each sample, as well as the total GC%\n :param files: dictionary of stain name: /sequencepath/strain_name.extension\n :param records: Dictionary of strain name: SeqIO records\n :return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains\n \"\"\"\n # Initialise dictionaries\n contig_len_dict = dict()\n gc_dict = dict()\n for file_name in files:\n # Initialise variables to store appropriate values parsed from contig records\n contig_lengths = list()\n fasta_sequence = str()\n for contig, record in records[file_name].items():\n # Append the length of the contig to the list\n contig_lengths.append(len(record.seq))\n # Add the contig sequence to the string\n fasta_sequence += record.seq\n # Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value\n contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)\n # Calculate the GC% of the total genome sequence using GC - format to have two decimal places\n gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))\n return contig_len_dict, gc_dict\n",
"def find_contig_distribution(contig_lengths_dict):\n \"\"\"\n Determine the frequency of different contig size ranges for each strain\n :param contig_lengths_dict:\n :return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies\n \"\"\"\n # Initialise the dictionary\n contig_len_dist_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n # Initialise integers to store the number of contigs that fall into the different bin sizes\n over_1000000 = 0\n over_500000 = 0\n over_100000 = 0\n over_50000 = 0\n over_10000 = 0\n over_5000 = 0\n other = 0\n for contig_length in contig_lengths:\n # Depending on the size of the contig, increment the appropriate integer\n if contig_length > 1000000:\n over_1000000 += 1\n elif contig_length > 500000:\n over_500000 += 1\n elif contig_length > 100000:\n over_100000 += 1\n elif contig_length > 50000:\n over_50000 += 1\n elif contig_length > 10000:\n over_10000 += 1\n elif contig_length > 5000:\n over_5000 += 1\n else:\n other += 1\n # Populate the dictionary with a tuple of each of the size range frequencies\n contig_len_dist_dict[file_name] = (over_1000000,\n over_500000,\n over_100000,\n over_50000,\n over_10000,\n over_5000,\n other)\n return contig_len_dist_dict\n",
"def find_largest_contig(contig_lengths_dict):\n \"\"\"\n Determine the largest contig for each strain\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :return: longest_contig_dict: dictionary of strain name: longest contig\n \"\"\"\n # Initialise the dictionary\n longest_contig_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n # As the list is sorted in descending order, the largest contig is the first entry in the list\n longest_contig_dict[file_name] = contig_lengths[0]\n return longest_contig_dict\n",
"def find_genome_length(contig_lengths_dict):\n \"\"\"\n Determine the total length of all the contigs for each strain\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :return: genome_length_dict: dictionary of strain name: total genome length\n \"\"\"\n # Initialise the dictionary\n genome_length_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n # Use the sum() method to add all the contig lengths in the list\n genome_length_dict[file_name] = sum(contig_lengths)\n return genome_length_dict\n",
"def find_num_contigs(contig_lengths_dict):\n \"\"\"\n Count the total number of contigs for each strain\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :return: num_contigs_dict: dictionary of strain name: total number of contigs\n \"\"\"\n # Initialise the dictionary\n num_contigs_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n # Use the len() method to count the number of entries in the list\n num_contigs_dict[file_name] = len(contig_lengths)\n return num_contigs_dict\n",
"def find_n50(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total\n genome size is contained in contigs equal to or larger than this contig\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: n50_dict: dictionary of strain name: N50\n \"\"\"\n # Initialise the dictionary\n n50_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n # Initialise a variable to store a running total of contig lengths\n currentlength = 0\n for contig_length in contig_lengths:\n # Increment the current length with the length of the current contig\n currentlength += contig_length\n # If the current length is now greater than the total genome / 2, the current contig length is the N50\n if currentlength >= genome_length_dict[file_name] * 0.5:\n # Populate the dictionary, and break the loop\n n50_dict[file_name] = contig_length\n break\n return n50_dict\n",
"def find_n75(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total\n genome size is contained in contigs equal to or larger than this contig\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: n75_dict: dictionary of strain name: N75\n \"\"\"\n # Initialise the dictionary\n n75_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n currentlength = 0\n for contig_length in contig_lengths:\n currentlength += contig_length\n # If the current length is now greater than the 3/4 of the total genome length, the current contig length\n # is the N75\n if currentlength >= genome_length_dict[file_name] * 0.75:\n n75_dict[file_name] = contig_length\n break\n return n75_dict\n",
"def find_n90(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total\n genome size is contained in contigs equal to or larger than this contig\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: n75_dict: dictionary of strain name: N90\n \"\"\"\n # Initialise the dictionary\n n90_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n currentlength = 0\n for contig_length in contig_lengths:\n currentlength += contig_length\n # If the current length is now greater than the 3/4 of the total genome length, the current contig length\n # is the N75\n if currentlength >= genome_length_dict[file_name] * 0.95:\n n90_dict[file_name] = contig_length\n break\n return n90_dict\n",
"def find_l50(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: l50_dict: dictionary of strain name: L50\n \"\"\"\n # Initialise the dictionary\n l50_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n currentlength = 0\n # Initialise a variable to count how many contigs have been added to the currentlength variable\n currentcontig = 0\n for contig_length in contig_lengths:\n currentlength += contig_length\n # Increment :currentcontig each time a contig is added to the current length\n currentcontig += 1\n # Same logic as with the N50, but the contig number is added instead of the length of the contig\n if currentlength >= genome_length_dict[file_name] * 0.5:\n l50_dict[file_name] = currentcontig\n break\n return l50_dict\n",
"def find_l75(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: l50_dict: dictionary of strain name: L75\n \"\"\"\n # Initialise the dictionary\n l75_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n currentlength = 0\n currentcontig = 0\n for contig_length in contig_lengths:\n currentlength += contig_length\n currentcontig += 1\n # Same logic as with the L75, but the contig number is added instead of the length of the contig\n if currentlength >= genome_length_dict[file_name] * 0.75:\n l75_dict[file_name] = currentcontig\n break\n return l75_dict\n",
"def find_l90(contig_lengths_dict, genome_length_dict):\n \"\"\"\n Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90\n :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths\n :param genome_length_dict: dictionary of strain name: total genome length\n :return: l90_dict: dictionary of strain name: L90\n \"\"\"\n # Initialise the dictionary\n l90_dict = dict()\n for file_name, contig_lengths in contig_lengths_dict.items():\n currentlength = 0\n # Initialise a variable to count how many contigs have been added to the currentlength variable\n currentcontig = 0\n for contig_length in contig_lengths:\n currentlength += contig_length\n # Increment :currentcontig each time a contig is added to the current length\n currentcontig += 1\n # Same logic as with the N50, but the contig number is added instead of the length of the contig\n if currentlength >= genome_length_dict[file_name] * 0.9:\n l90_dict[file_name] = currentcontig\n break\n return l90_dict\n",
"def predict_orfs(file_dict, num_threads=1):\n \"\"\"\n Use prodigal to predict the number of open reading frames (ORFs) in each strain\n :param file_dict: dictionary of strain name: /sequencepath/strain_name.extension\n :param num_threads: number of threads to use in the pool of prodigal processes\n :return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco\n \"\"\"\n # Initialise the dictionary\n orf_file_dict = dict()\n prodigallist = list()\n for file_name, file_path in file_dict.items():\n # Set the name of the output .sco results file\n results = os.path.splitext(file_path)[0] + '.sco'\n # Create the command for prodigal to execute - use sco output format\n prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']\n # Only run prodigal if the output file doesn't already exist\n if not os.path.isfile(results):\n prodigallist.append(prodigal)\n # Populate the dictionary with the name of the results file\n orf_file_dict[file_name] = results\n # Setup the multiprocessing pool.\n pool = multiprocessing.Pool(processes=num_threads)\n pool.map(run_prodigal, prodigallist)\n pool.close()\n pool.join()\n return orf_file_dict\n",
"def find_orf_distribution(orf_file_dict):\n \"\"\"\n Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain\n :param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco\n :return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies\n \"\"\"\n # Initialise the dictionary\n orf_dist_dict = dict()\n for file_name, orf_report in orf_file_dict.items():\n # Initialise variable to store the frequency of the different ORF size ranges\n total_orfs = 0\n over_3000 = 0\n over_1000 = 0\n over_500 = 0\n other = 0\n # Open the strain-specific report\n with open(orf_report, 'r') as orfreport:\n for line in orfreport:\n # The report has a header section that can be ignored - only parse lines beginning with '>'\n if line.startswith('>'):\n # Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,\n # direction: -\n contig, start, stop, direction = line.split('_')\n # The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575\n size = int(stop) - int(start)\n # Increment the total number of ORFs before binning based on ORF size\n total_orfs += 1\n # Increment the appropriate integer based on ORF size\n if size > 3000:\n over_3000 += 1\n elif size > 1000:\n over_1000 += 1\n elif size > 500:\n over_500 += 1\n else:\n other += 1\n # Populate the dictionary with a tuple of the ORF size range frequencies\n orf_dist_dict[file_name] = (total_orfs,\n over_3000,\n over_1000,\n over_500,\n other)\n # Clean-up the prodigal reports\n try:\n os.remove(orf_report)\n except IOError:\n pass\n return orf_dist_dict\n",
"def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,\n n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):\n \"\"\"\n Create a report of all the extracted features\n :param gc_dict: dictionary of strain name: GC%\n :param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies\n :param longest_contig_dict: dictionary of strain name: longest contig\n :param genome_length_dict: dictionary of strain name: total genome length\n :param num_contigs_dict: dictionary of strain name: total number of contigs\n :param n50_dict: dictionary of strain name: N50\n :param n75_dict: dictionary of strain name: N75\n :param n90_dict: dictionary of strain name: N90\n :param l50_dict: dictionary of strain name: L50\n :param l75_dict: dictionary of strain name: L75\n :param l90_dict: dictionary of strain name: L90\n :param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies\n :param genus_dict: dictionary of strain name: genus\n :param sequencepath: path of folder containing FASTA genomes\n \"\"\"\n # Initialise string with header information\n data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \\\n 'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \\\n 'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\\n'\n # Create and open the report for writign\n with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:\n for file_name in sorted(longest_contig_dict):\n # Populate the data string with the appropriate values\n data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \\\n '{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \\\n '{l50},{l75},{l90},{gc},{genus}\\n'\\\n .format(name=file_name,\n totlen=genome_length_dict[file_name],\n numcontigs=num_contigs_dict[file_name],\n longestcontig=longest_contig_dict[file_name],\n over_106=contig_dist_dict[file_name][0],\n over_56=contig_dist_dict[file_name][1],\n over_105=contig_dist_dict[file_name][2],\n over_55=contig_dist_dict[file_name][3],\n over_104=contig_dist_dict[file_name][4],\n over_54=contig_dist_dict[file_name][5],\n under_54=contig_dist_dict[file_name][6],\n tORFS=orf_dist_dict[file_name][0],\n ORF33=orf_dist_dict[file_name][1],\n ORF13=orf_dist_dict[file_name][2],\n ORF52=orf_dist_dict[file_name][3],\n ORF11=orf_dist_dict[file_name][4],\n n50=n50_dict[file_name],\n n75=n75_dict[file_name],\n n90=n90_dict[file_name],\n l50=l50_dict[file_name],\n l75=l75_dict[file_name],\n l90=l90_dict[file_name],\n gc=gc_dict[file_name],\n genus=genus_dict[file_name])\n # Write the string to file\n feature_report.write(data)\n"
] | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_files | python | def find_files(sequencepath):
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files | Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L54-L63 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | filer | python | def filer(filelist):
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict | Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L66-L79 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | fasta_records | python | def fasta_records(files):
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records | Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L82-L95 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_genus | python | def find_genus(files, database, threads=12):
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict | Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L98-L130 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | fasta_stats | python | def fasta_stats(files, records):
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict | Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L133-L156 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_contig_distribution | python | def find_contig_distribution(contig_lengths_dict):
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict | Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L159-L200 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_largest_contig | python | def find_largest_contig(contig_lengths_dict):
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict | Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L203-L214 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_genome_length | python | def find_genome_length(contig_lengths_dict):
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict | Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L217-L228 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_num_contigs | python | def find_num_contigs(contig_lengths_dict):
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict | Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L231-L242 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_n50 | python | def find_n50(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict | Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L245-L266 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_n75 | python | def find_n75(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict | Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L269-L288 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_n90 | python | def find_n90(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict | Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L291-L310 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_l50 | python | def find_l50(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict | Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L313-L334 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_l75 | python | def find_l75(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict | Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L337-L356 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_l90 | python | def find_l90(contig_lengths_dict, genome_length_dict):
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict | Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90 | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L359-L380 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | predict_orfs | python | def predict_orfs(file_dict, num_threads=1):
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict | Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L383-L408 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | find_orf_distribution | python | def find_orf_distribution(orf_file_dict):
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict | Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L416-L463 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | reporter | python | def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data) | Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L466-L521 | null | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
def cli(sequencepath, report, refseq_database):
"""
Pass command line arguments to, and run the feature extraction functions
"""
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
if __name__ == '__main__':
cli()
|
OLC-LOC-Bioinformatics/GenomeQAML | genomeqaml/extract_features.py | cli | python | def cli(sequencepath, report, refseq_database):
main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count()) | Pass command line arguments to, and run the feature extraction functions | train | https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L539-L543 | [
"def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):\n \"\"\"\n Run the appropriate functions in order\n :param sequencepath: path of folder containing FASTA genomes\n :param report: boolean to determine whether a report is to be created\n :param refseq_database: Path to reduced refseq database sketch\n :param num_threads: Number of threads to run mash/other stuff on\n :return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \\\n n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict\n \"\"\"\n files = find_files(sequencepath)\n file_dict = filer(files)\n printtime('Using MASH to determine genera of samples', start)\n genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)\n file_records = fasta_records(file_dict)\n printtime('Collecting basic quality metrics', start)\n contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)\n contig_dist_dict = find_contig_distribution(contig_len_dict)\n longest_contig_dict = find_largest_contig(contig_len_dict)\n genome_length_dict = find_genome_length(contig_len_dict)\n num_contigs_dict = find_num_contigs(contig_len_dict)\n n50_dict = find_n50(contig_len_dict, genome_length_dict)\n n75_dict = find_n75(contig_len_dict, genome_length_dict)\n n90_dict = find_n90(contig_len_dict, genome_length_dict)\n l50_dict = find_l50(contig_len_dict, genome_length_dict)\n l75_dict = find_l75(contig_len_dict, genome_length_dict)\n l90_dict = find_l90(contig_len_dict, genome_length_dict)\n printtime('Using prodigal to calculate number of ORFs in each sample', start)\n orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)\n orf_dist_dict = find_orf_distribution(orf_file_dict)\n if report:\n reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,\n n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)\n printtime('Features extracted!', start)\n return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \\\n n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict\n"
] | #!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import printtime
from Bio.SeqUtils import GC
from biotools import mash
import multiprocessing
from Bio import SeqIO
from glob import glob
import subprocess
import shutil
import click
import time
import os
__author__ = 'adamkoziol', 'andrewlow'
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()):
"""
Run the appropriate functions in order
:param sequencepath: path of folder containing FASTA genomes
:param report: boolean to determine whether a report is to be created
:param refseq_database: Path to reduced refseq database sketch
:param num_threads: Number of threads to run mash/other stuff on
:return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
"""
files = find_files(sequencepath)
file_dict = filer(files)
printtime('Using MASH to determine genera of samples', start)
genus_dict = find_genus(file_dict, refseq_database, threads=num_threads)
file_records = fasta_records(file_dict)
printtime('Collecting basic quality metrics', start)
contig_len_dict, gc_dict = fasta_stats(file_dict, file_records)
contig_dist_dict = find_contig_distribution(contig_len_dict)
longest_contig_dict = find_largest_contig(contig_len_dict)
genome_length_dict = find_genome_length(contig_len_dict)
num_contigs_dict = find_num_contigs(contig_len_dict)
n50_dict = find_n50(contig_len_dict, genome_length_dict)
n75_dict = find_n75(contig_len_dict, genome_length_dict)
n90_dict = find_n90(contig_len_dict, genome_length_dict)
l50_dict = find_l50(contig_len_dict, genome_length_dict)
l75_dict = find_l75(contig_len_dict, genome_length_dict)
l90_dict = find_l90(contig_len_dict, genome_length_dict)
printtime('Using prodigal to calculate number of ORFs in each sample', start)
orf_file_dict = predict_orfs(file_dict, num_threads=num_threads)
orf_dist_dict = find_orf_distribution(orf_file_dict)
if report:
reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict,
n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath)
printtime('Features extracted!', start)
return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
def find_files(sequencepath):
"""
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as
.fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported
:param sequencepath: path of folder containing FASTA genomes
:return: list of FASTA files
"""
# Create a sorted list of all the FASTA files in the sequence path
files = sorted(glob(os.path.join(sequencepath, '*.fa*')))
return files
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict
def fasta_records(files):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:return: file_records: dictionary of all contig records for all strains
"""
# Initialise the dictionary
file_records = dict()
for file_name, fasta in files.items():
# Create a dictionary of records for each file
record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
# Set the records dictionary as the value for file_records
file_records[file_name] = record_dict
return file_records
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict
def fasta_stats(files, records):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
:param files: dictionary of stain name: /sequencepath/strain_name.extension
:param records: Dictionary of strain name: SeqIO records
:return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
"""
# Initialise dictionaries
contig_len_dict = dict()
gc_dict = dict()
for file_name in files:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in records[file_name].items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
contig_len_dict[file_name] = sorted(contig_lengths, reverse=True)
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence)))
return contig_len_dict, gc_dict
def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict
def find_largest_contig(contig_lengths_dict):
"""
Determine the largest contig for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: longest_contig_dict: dictionary of strain name: longest contig
"""
# Initialise the dictionary
longest_contig_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# As the list is sorted in descending order, the largest contig is the first entry in the list
longest_contig_dict[file_name] = contig_lengths[0]
return longest_contig_dict
def find_genome_length(contig_lengths_dict):
"""
Determine the total length of all the contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: genome_length_dict: dictionary of strain name: total genome length
"""
# Initialise the dictionary
genome_length_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the sum() method to add all the contig lengths in the list
genome_length_dict[file_name] = sum(contig_lengths)
return genome_length_dict
def find_num_contigs(contig_lengths_dict):
"""
Count the total number of contigs for each strain
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:return: num_contigs_dict: dictionary of strain name: total number of contigs
"""
# Initialise the dictionary
num_contigs_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Use the len() method to count the number of entries in the list
num_contigs_dict[file_name] = len(contig_lengths)
return num_contigs_dict
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict
def find_n75(contig_lengths_dict, genome_length_dict):
"""
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N75
"""
# Initialise the dictionary
n75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.75:
n75_dict[file_name] = contig_length
break
return n75_dict
def find_n90(contig_lengths_dict, genome_length_dict):
"""
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n75_dict: dictionary of strain name: N90
"""
# Initialise the dictionary
n90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
# If the current length is now greater than the 3/4 of the total genome length, the current contig length
# is the N75
if currentlength >= genome_length_dict[file_name] * 0.95:
n90_dict[file_name] = contig_length
break
return n90_dict
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
def find_l75(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L75
"""
# Initialise the dictionary
l75_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
# Same logic as with the L75, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.75:
l75_dict[file_name] = currentcontig
break
return l75_dict
def find_l90(contig_lengths_dict, genome_length_dict):
"""
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l90_dict: dictionary of strain name: L90
"""
# Initialise the dictionary
l90_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.9:
l90_dict[file_name] = currentcontig
break
return l90_dict
def predict_orfs(file_dict, num_threads=1):
"""
Use prodigal to predict the number of open reading frames (ORFs) in each strain
:param file_dict: dictionary of strain name: /sequencepath/strain_name.extension
:param num_threads: number of threads to use in the pool of prodigal processes
:return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
"""
# Initialise the dictionary
orf_file_dict = dict()
prodigallist = list()
for file_name, file_path in file_dict.items():
# Set the name of the output .sco results file
results = os.path.splitext(file_path)[0] + '.sco'
# Create the command for prodigal to execute - use sco output format
prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco']
# Only run prodigal if the output file doesn't already exist
if not os.path.isfile(results):
prodigallist.append(prodigal)
# Populate the dictionary with the name of the results file
orf_file_dict[file_name] = results
# Setup the multiprocessing pool.
pool = multiprocessing.Pool(processes=num_threads)
pool.map(run_prodigal, prodigallist)
pool.close()
pool.join()
return orf_file_dict
def run_prodigal(prodigal_command):
with open(os.devnull, 'w') as f: # No need to make the use see prodigal output, send it to devnull
subprocess.call(prodigal_command, stdout=f, stderr=f)
def find_orf_distribution(orf_file_dict):
"""
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain
:param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
:return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
"""
# Initialise the dictionary
orf_dist_dict = dict()
for file_name, orf_report in orf_file_dict.items():
# Initialise variable to store the frequency of the different ORF size ranges
total_orfs = 0
over_3000 = 0
over_1000 = 0
over_500 = 0
other = 0
# Open the strain-specific report
with open(orf_report, 'r') as orfreport:
for line in orfreport:
# The report has a header section that can be ignored - only parse lines beginning with '>'
if line.startswith('>'):
# Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920,
# direction: -
contig, start, stop, direction = line.split('_')
# The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575
size = int(stop) - int(start)
# Increment the total number of ORFs before binning based on ORF size
total_orfs += 1
# Increment the appropriate integer based on ORF size
if size > 3000:
over_3000 += 1
elif size > 1000:
over_1000 += 1
elif size > 500:
over_500 += 1
else:
other += 1
# Populate the dictionary with a tuple of the ORF size range frequencies
orf_dist_dict[file_name] = (total_orfs,
over_3000,
over_1000,
over_500,
other)
# Clean-up the prodigal reports
try:
os.remove(orf_report)
except IOError:
pass
return orf_dist_dict
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict,
n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath):
"""
Create a report of all the extracted features
:param gc_dict: dictionary of strain name: GC%
:param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies
:param longest_contig_dict: dictionary of strain name: longest contig
:param genome_length_dict: dictionary of strain name: total genome length
:param num_contigs_dict: dictionary of strain name: total number of contigs
:param n50_dict: dictionary of strain name: N50
:param n75_dict: dictionary of strain name: N75
:param n90_dict: dictionary of strain name: N90
:param l50_dict: dictionary of strain name: L50
:param l75_dict: dictionary of strain name: L75
:param l90_dict: dictionary of strain name: L90
:param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies
:param genus_dict: dictionary of strain name: genus
:param sequencepath: path of folder containing FASTA genomes
"""
# Initialise string with header information
data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \
'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \
'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n'
# Create and open the report for writign
with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report:
for file_name in sorted(longest_contig_dict):
# Populate the data string with the appropriate values
data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \
'{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \
'{l50},{l75},{l90},{gc},{genus}\n'\
.format(name=file_name,
totlen=genome_length_dict[file_name],
numcontigs=num_contigs_dict[file_name],
longestcontig=longest_contig_dict[file_name],
over_106=contig_dist_dict[file_name][0],
over_56=contig_dist_dict[file_name][1],
over_105=contig_dist_dict[file_name][2],
over_55=contig_dist_dict[file_name][3],
over_104=contig_dist_dict[file_name][4],
over_54=contig_dist_dict[file_name][5],
under_54=contig_dist_dict[file_name][6],
tORFS=orf_dist_dict[file_name][0],
ORF33=orf_dist_dict[file_name][1],
ORF13=orf_dist_dict[file_name][2],
ORF52=orf_dist_dict[file_name][3],
ORF11=orf_dist_dict[file_name][4],
n50=n50_dict[file_name],
n75=n75_dict[file_name],
n90=n90_dict[file_name],
l50=l50_dict[file_name],
l75=l75_dict[file_name],
l90=l90_dict[file_name],
gc=gc_dict[file_name],
genus=genus_dict[file_name])
# Write the string to file
feature_report.write(data)
# Initialise the click decorator
@click.command()
@click.option('-s', '--sequencepath',
type=click.Path(exists=True),
required=True,
help='Path of folder containing multi-FASTA files')
@click.option('-d', '--refseq_database',
type=click.Path(exists=True),
required=True,
help='Path to reduced mash sketch of RefSeq.')
@click.option('-r', '--report',
is_flag=True,
default=True,
help='By default, a report of the extracted features is created. Include this flag if you do not want '
'a report created')
if __name__ == '__main__':
cli()
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.get_param_doc | python | def get_param_doc(doc, param):
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype | Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param` | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L129-L157 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.setup_args | python | def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func) | Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html) | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L162-L310 | [
"def setup(func):\n # insert the function\n if insert_at is None:\n self._used_functions.append(func)\n else:\n self._used_functions.insert(insert_at, func)\n\n args_dict = self.unfinished_arguments\n\n # save the function to use in parse2funcs\n if setup_as:\n args_dict[setup_as] = dict(\n long=setup_as, default=func, help=argparse.SUPPRESS)\n self._setup_as = setup_as\n\n # create arguments\n args, varargs, varkw, defaults = inspect.getargspec(func)\n full_doc = docstrings.dedents(inspect.getdoc(func))\n\n summary = docstrings.get_full_description(full_doc)\n if summary:\n if not self.description or overwrite:\n self.description = summary\n full_doc = docstrings._remove_summary(full_doc)\n\n self.extract_as_epilog(full_doc, epilog_sections, overwrite,\n append_epilog)\n\n doc = docstrings._get_section(full_doc, 'Parameters') + '\\n'\n doc += docstrings._get_section(full_doc, 'Other Parameters')\n doc = doc.rstrip()\n default_min = len(args or []) - len(defaults or [])\n for i, arg in enumerate(args):\n if arg == 'self' or arg in args_dict:\n continue\n arg_doc, dtype = self.get_param_doc(doc, arg)\n args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',\n '-'),\n 'long': arg.replace('_', '-')}\n if arg_doc:\n d['help'] = arg_doc\n if i >= default_min:\n d['default'] = defaults[i - default_min]\n else:\n d['positional'] = True\n if interprete and dtype == 'bool' and 'default' in d:\n d['action'] = 'store_false' if d['default'] else \\\n 'store_true'\n elif interprete and dtype:\n if dtype.startswith('list of'):\n d['nargs'] = '+'\n dtype = dtype[7:].strip()\n if dtype in ['str', 'string', 'strings']:\n d['type'] = six.text_type\n if dtype == 'strings':\n dtype = 'string'\n else:\n try:\n d['type'] = getattr(builtins, dtype)\n except AttributeError:\n try: # maybe the dtype has a final 's'\n d['type'] = getattr(builtins, dtype[:-1])\n dtype = dtype[:-1]\n except AttributeError:\n pass\n d['metavar'] = dtype\n return func\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.add_subparsers | python | def add_subparsers(self, *args, **kwargs):
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret | Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L314-L330 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.setup_subparser | python | def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)] | Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split()) | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L333-L398 | [
"def setup(func):\n if self._subparsers_action is None:\n raise RuntimeError(\n \"No subparsers have yet been created! Run the \"\n \"add_subparsers method first!\")\n # replace underscore by '-'\n name2use = name\n if name2use is None:\n name2use = func.__name__.replace('_', '-')\n kwargs.setdefault('help', docstrings.get_summary(\n docstrings.dedents(inspect.getdoc(func))))\n parser = self._subparsers_action.add_parser(name2use, **kwargs)\n parser.setup_args(\n func, setup_as=setup_as, insert_at=insert_at,\n interprete=interprete, epilog_sections=epilog_sections,\n overwrite=overwrite, append_epilog=append_epilog)\n return func, parser\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.update_arg | python | def update_arg(self, arg, if_existent=None, **kwargs):
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs) | Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L402-L422 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser._get_corresponding_parsers | python | def _get_corresponding_parsers(self, func):
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp | Get the parser that has been set up by the given `function` | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L467-L474 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.pop_key | python | def pop_key(self, arg, key, *args, **kwargs):
return self.unfinished_arguments[arg].pop(key, *args, **kwargs) | Delete a previously defined key for the `add_argument` | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L487-L490 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.create_arguments | python | def create_arguments(self, subparsers=False):
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret | Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L498-L540 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.format_epilog_section | python | def format_epilog_section(self, section, text):
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text) | Format a section for the epilog by inserting a format | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L578-L586 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.extract_as_epilog | python | def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog | Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L588-L617 | [
"def format_epilog_section(self, section, text):\n \"\"\"Format a section for the epilog by inserting a format\"\"\"\n try:\n func = self._epilog_formatters[self.epilog_formatter]\n except KeyError:\n if not callable(self.epilog_formatter):\n raise\n func = self.epilog_formatter\n return func(section, text)\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.grouparg | python | def grouparg(self, arg, my_arg=None, parent_cmds=[]):
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret | Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L619-L679 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.__parse_main | python | def __parse_main(self, args):
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args) | Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L709-L717 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.update_short | python | def update_short(self, **kwargs):
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val) | Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L721-L755 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.update_long | python | def update_long(self, **kwargs):
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val) | Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L792-L826 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.parse2func | python | def parse2func(self, args=None, func=None):
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws) | Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers! | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L861-L890 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.parse_known2func | python | def parse_known2func(self, args=None, func=None):
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder | Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers! | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L892-L924 | [
"def parse_known_args(self, args=None, namespace=None):\n if self._chain_subparsers:\n if args is None:\n args = sys.argv[1:]\n choices_d = OrderedDict()\n remainders = OrderedDict()\n main_args = []\n # get the first argument to make sure that everything works\n cmd = self.__currentarg = None\n for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):\n if cmd is None:\n main_args += list(subargs)\n else:\n # replace '-' by underscore\n ns_cmd = cmd.replace('-', '_')\n choices_d[ns_cmd], remainders[ns_cmd] = super(\n FuncArgParser, self).parse_known_args(\n list(chain(main_args, subargs)))\n main_ns, remainders[None] = self.__parse_main(main_args)\n for key, val in vars(main_ns).items():\n choices_d[key] = val\n self.__currentarg = None\n if '__dummy' in choices_d:\n del choices_d['__dummy']\n return Namespace(**choices_d), list(chain(*remainders.values()))\n # otherwise, use the default behaviour\n return super(FuncArgParser, self).parse_known_args(args, namespace)\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.parse_chained | python | def parse_chained(self, args=None):
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws) | Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L926-L950 | [
"def _parse2subparser_funcs(self, kws):\n \"\"\"\n Recursive function to parse arguments to chained parsers\n \"\"\"\n choices = getattr(self._subparsers_action, 'choices', {})\n replaced = {key.replace('-', '_'): key for key in choices}\n sp_commands = set(replaced).intersection(kws)\n if not sp_commands:\n if self._setup_as is not None:\n func = kws.pop(self._setup_as)\n else:\n try:\n func = self._used_functions[-1]\n except IndexError:\n return None\n return func(**{\n key: kws[key] for key in set(kws).difference(choices)})\n else:\n ret = {}\n for key in sp_commands:\n ret[key.replace('-', '_')] = \\\n choices[replaced[key]]._parse2subparser_funcs(\n vars(kws[key]))\n return Namespace(**ret)\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.parse_known_chained | python | def parse_known_chained(self, args=None):
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder | Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L952-L979 | [
"def parse_known_args(self, args=None, namespace=None):\n if self._chain_subparsers:\n if args is None:\n args = sys.argv[1:]\n choices_d = OrderedDict()\n remainders = OrderedDict()\n main_args = []\n # get the first argument to make sure that everything works\n cmd = self.__currentarg = None\n for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):\n if cmd is None:\n main_args += list(subargs)\n else:\n # replace '-' by underscore\n ns_cmd = cmd.replace('-', '_')\n choices_d[ns_cmd], remainders[ns_cmd] = super(\n FuncArgParser, self).parse_known_args(\n list(chain(main_args, subargs)))\n main_ns, remainders[None] = self.__parse_main(main_args)\n for key, val in vars(main_ns).items():\n choices_d[key] = val\n self.__currentarg = None\n if '__dummy' in choices_d:\n del choices_d['__dummy']\n return Namespace(**choices_d), list(chain(*remainders.values()))\n # otherwise, use the default behaviour\n return super(FuncArgParser, self).parse_known_args(args, namespace)\n",
"def _parse2subparser_funcs(self, kws):\n \"\"\"\n Recursive function to parse arguments to chained parsers\n \"\"\"\n choices = getattr(self._subparsers_action, 'choices', {})\n replaced = {key.replace('-', '_'): key for key in choices}\n sp_commands = set(replaced).intersection(kws)\n if not sp_commands:\n if self._setup_as is not None:\n func = kws.pop(self._setup_as)\n else:\n try:\n func = self._used_functions[-1]\n except IndexError:\n return None\n return func(**{\n key: kws[key] for key in set(kws).difference(choices)})\n else:\n ret = {}\n for key in sp_commands:\n ret[key.replace('-', '_')] = \\\n choices[replaced[key]]._parse2subparser_funcs(\n vars(kws[key]))\n return Namespace(**ret)\n"
] | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser._parse2subparser_funcs | python | def _parse2subparser_funcs(self, kws):
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret) | Recursive function to parse arguments to chained parsers | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L981-L1004 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name]
|
Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.get_subparser | python | def get_subparser(self, name):
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name] | Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name` | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L1006-L1022 | null | class FuncArgParser(ArgumentParser):
"""Subclass of an argument parser that get's parts of the information
from a given function"""
_finalized = False
#: The unfinished arguments after the setup
unfinished_arguments = {}
#: The sections to extract from a function docstring that should be used
#: in the epilog of this parser. See also the :meth:`setup_args` method
epilog_sections = ['Notes', 'References']
#: The formatter specification for the epilog. This can either be a string
#: out of 'header', 'bold', or
#: 'rubric' or a callable (i.e. function) that takes two arguments,
#: the section title and the section text, and returns a string.
#:
#: 'heading'
#: Use section headers such as::
#:
#: Notes
#: -----
#: 'bold'
#: Just make a bold header for the section, e.g. ``**Notes**``
#: 'rubric'
#: Use a rubric rst directive, e.g. ``.. rubric:: Notes``
#:
#: .. warning::
#:
#: When building a sphinx documentation using the sphinx-argparse
#: module, this value should be set to ``'bold'`` or ``'rubric'``! Just
#: add this two lines to your conf.py:
#:
#: .. code-block:: python
#:
#: import funcargparse
#: funcargparse.FuncArgParser.epilog_formatter = 'rubric'
epilog_formatter = 'heading'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args,**kwargs``
Theses arguments are determined by the
:class:`argparse.ArgumentParser` base class. Note that by default,
we use a :class:`argparse.RawTextHelpFormatter` class for the
`formatter_class` keyword, whereas the
:class:`argparse.ArgumentParser` uses a
:class:`argparse.HelpFormatter`
Other Parameters
----------------
epilog_sections: list of str
The default sections to use for the epilog (see the
:attr:`epilog_sections` attribute). They can also be specified
each time the :meth:`setup_args` method is called
epilog_formatter: {'header', 'bold', 'rubric'} or function
Specify how the epilog sections should be formatted and defaults to
the :attr:`epilog_formatter` attribute. This can either be a string
out of 'header', 'bold', or 'rubric' or a callable (i.e. function)
that takes two arguments, the section title and the section text,
and returns a string.
'heading'
Use section headers such as::
Notes
-----
'bold'
Just make a bold header for the section, e.g. ``**Notes**``
'rubric'
Use a rubric rst directive, e.g. ``.. rubric:: Notes``
"""
self._subparsers_action = None
kwargs.setdefault('formatter_class', argparse.RawTextHelpFormatter)
epilog_sections = kwargs.pop('epilog_sections', None)
if epilog_sections is not None:
self.epilog_sections = epilog_sections
epilog_formatter = kwargs.pop('epilog_formatter', None)
if epilog_formatter is not None:
self.epilog_formatter = epilog_formatter
super(FuncArgParser, self).__init__(*args, **kwargs)
self.unfinished_arguments = OrderedDict()
self._used_functions = []
self.__currentarg = None
self._chain_subparsers = False
self._setup_as = None
self._epilog_formatters = {'heading': self.format_heading,
'bold': self.format_bold,
'rubric': self.format_rubric}
@staticmethod
def get_param_doc(doc, param):
"""Get the documentation and datatype for a parameter
This function returns the documentation and the argument for a
napoleon like structured docstring `doc`
Parameters
----------
doc: str
The base docstring to use
param: str
The argument to use
Returns
-------
str
The documentation of the given `param`
str
The datatype of the given `param`"""
arg_doc = docstrings.keep_params_s(doc, [param]) or \
docstrings.keep_types_s(doc, [param])
dtype = None
if arg_doc:
lines = arg_doc.splitlines()
arg_doc = dedents('\n' + '\n'.join(lines[1:]))
param_desc = lines[0].split(':', 1)
if len(param_desc) > 1:
dtype = param_desc[1].strip()
return arg_doc, dtype
@docstrings.get_sectionsf('FuncArgParser.setup_args',
sections=['Parameters', 'Returns'])
@docstrings.dedent
def setup_args(self, func=None, setup_as=None, insert_at=None,
interprete=True, epilog_sections=None,
overwrite=False, append_epilog=True):
"""
Add the parameters from the given `func` to the parameter settings
Parameters
----------
func: function
The function to use. If None, a function will be returned that can
be used as a decorator
setup_as: str
The attribute that shall be assigned to the function in the
resulting namespace. If specified, this function will be used when
calling the :meth:`parse2func` method
insert_at: int
The position where the given `func` should be inserted. If None,
it will be appended at the end and used when calling the
:meth:`parse2func` method
interprete: bool
If True (default), the docstrings are interpreted and switches and
lists are automatically inserted (see the
[interpretation-docs]_
epilog_sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog and the existing description
of the parser
append_epilog: bool
If True, append to the existing epilog
Returns
-------
function
Either the function that can be used as a decorator (if `func` is
``None``), or the given `func` itself.
Examples
--------
Use this method as a decorator::
>>> @parser.setup_args
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
Or by specifying the setup_as function::
>>> @parser.setup_args(setup_as='func')
... def do_something(a=1):
'''
Just an example
Parameters
----------
a: int
A number to increment by one
'''
return a + 1
>>> args = parser.parse_args('-a 2'.split())
>>> args.func is do_something
>>> parser.parse2func('-a 2'.split())
3
References
----------
.. [interpretation-docs]
http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html)
"""
def setup(func):
# insert the function
if insert_at is None:
self._used_functions.append(func)
else:
self._used_functions.insert(insert_at, func)
args_dict = self.unfinished_arguments
# save the function to use in parse2funcs
if setup_as:
args_dict[setup_as] = dict(
long=setup_as, default=func, help=argparse.SUPPRESS)
self._setup_as = setup_as
# create arguments
args, varargs, varkw, defaults = inspect.getargspec(func)
full_doc = docstrings.dedents(inspect.getdoc(func))
summary = docstrings.get_full_description(full_doc)
if summary:
if not self.description or overwrite:
self.description = summary
full_doc = docstrings._remove_summary(full_doc)
self.extract_as_epilog(full_doc, epilog_sections, overwrite,
append_epilog)
doc = docstrings._get_section(full_doc, 'Parameters') + '\n'
doc += docstrings._get_section(full_doc, 'Other Parameters')
doc = doc.rstrip()
default_min = len(args or []) - len(defaults or [])
for i, arg in enumerate(args):
if arg == 'self' or arg in args_dict:
continue
arg_doc, dtype = self.get_param_doc(doc, arg)
args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_',
'-'),
'long': arg.replace('_', '-')}
if arg_doc:
d['help'] = arg_doc
if i >= default_min:
d['default'] = defaults[i - default_min]
else:
d['positional'] = True
if interprete and dtype == 'bool' and 'default' in d:
d['action'] = 'store_false' if d['default'] else \
'store_true'
elif interprete and dtype:
if dtype.startswith('list of'):
d['nargs'] = '+'
dtype = dtype[7:].strip()
if dtype in ['str', 'string', 'strings']:
d['type'] = six.text_type
if dtype == 'strings':
dtype = 'string'
else:
try:
d['type'] = getattr(builtins, dtype)
except AttributeError:
try: # maybe the dtype has a final 's'
d['type'] = getattr(builtins, dtype[:-1])
dtype = dtype[:-1]
except AttributeError:
pass
d['metavar'] = dtype
return func
if func is None:
return setup
else:
return setup(func)
@docstrings.get_sectionsf('FuncArgParser.add_subparsers')
@docstrings.dedent
def add_subparsers(self, *args, **kwargs):
"""
Add subparsers to this parser
Parameters
----------
``*args, **kwargs``
As specified by the original
:meth:`argparse.ArgumentParser.add_subparsers` method
chain: bool
Default: False. If True, It is enabled to chain subparsers"""
chain = kwargs.pop('chain', None)
ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs)
if chain:
self._chain_subparsers = True
self._subparsers_action = ret
return ret
@docstrings.dedent
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)]
@docstrings.get_sectionsf('FuncArgParser.update_arg')
@docstrings.dedent
def update_arg(self, arg, if_existent=None, **kwargs):
"""
Update the `add_argument` data for the given parameter
Parameters
----------
arg: str
The name of the function argument
if_existent: bool or None
If True, the argument is updated. If None (default), the argument
is only updated, if it exists. Otherwise, if False, the given
``**kwargs`` are only used if the argument is not yet existing
``**kwargs``
The keyword arguments any parameter for the
:meth:`argparse.ArgumentParser.add_argument` method
"""
if if_existent or (if_existent is None and
arg in self.unfinished_arguments):
self.unfinished_arguments[arg].update(kwargs)
elif not if_existent and if_existent is not None:
self.unfinished_arguments.setdefault(arg, kwargs)
@docstrings.dedent
def update_argf(self, arg, **kwargs):
"""
Update the arguments as a decorator
Parameters
---------
%(FuncArgParser.update_arg.parameters)s
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.update_argf('my_argument', type=int)
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
>>> isinstance(args.my_argument, int)
True
See Also
--------
update_arg"""
return self._as_decorator('update_arg', arg, **kwargs)
def _as_decorator(self, funcname, *args, **kwargs):
def func_decorator(func):
success = False
for parser in self._get_corresponding_parsers(func):
getattr(parser, funcname)(*args, **kwargs)
success = True
if not success:
raise ValueError(
"Could not figure out to which this %s belongs" % func)
return func
return func_decorator
def _get_corresponding_parsers(self, func):
"""Get the parser that has been set up by the given `function`"""
if func in self._used_functions:
yield self
if self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
for sp in parser._get_corresponding_parsers(func):
yield sp
def pop_arg(self, *args, **kwargs):
"""Delete a previously defined argument from the parser
"""
return self.unfinished_arguments.pop(*args, **kwargs)
def pop_argf(self, *args, **kwargs):
"""Delete a previously defined argument from the parser via decorators
Same as :meth:`pop_arg` but it can be used as a decorator"""
return self._as_decorator('pop_arg', *args, **kwargs)
def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
def pop_keyf(self, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
Same as :meth:`pop_key` but it can be used as a decorator"""
return self._as_decorator('pop_key', *args, **kwargs)
def create_arguments(self, subparsers=False):
"""Create and add the arguments
Parameters
----------
subparsers: bool
If True, the arguments of the subparsers are also created"""
ret = []
if not self._finalized:
for arg, d in self.unfinished_arguments.items():
try:
not_positional = int(not d.pop('positional', False))
short = d.pop('short', None)
long_name = d.pop('long', None)
if short is None and long_name is None:
raise ValueError(
"Either a short (-) or a long (--) argument must "
"be provided!")
if not not_positional:
short = arg
long_name = None
d.pop('dest', None)
if short == long_name:
long_name = None
args = []
if short:
args.append('-' * not_positional + short)
if long_name:
args.append('--' * not_positional + long_name)
group = d.pop('group', self)
if d.get('action') in ['store_true', 'store_false']:
d.pop('metavar', None)
ret.append(group.add_argument(*args, **d))
except Exception:
print('Error while creating argument %s' % arg)
raise
else:
raise ValueError('Parser has already been finalized!')
self._finalized = True
if subparsers and self._subparsers_action is not None:
for parser in self._subparsers_action.choices.values():
parser.create_arguments(True)
return ret
def append2help(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
self.unfinished_arguments[arg]['help'] += s
def append2helpf(self, arg, s):
"""Append the given string to the help of argument `arg`
Parameters
----------
arg: str
The function argument
s: str
The string to append to the help"""
return self._as_decorator('append2help', arg, s)
@staticmethod
def format_bold(section, text):
"""Make a bold formatting for the section header"""
return '**%s**\n\n%s' % (section, text)
@staticmethod
def format_rubric(section, text):
"""Make a bold formatting for the section header"""
return '.. rubric:: %s\n\n%s' % (section, text)
@staticmethod
def format_heading(section, text):
return '\n'.join([section, '-' * len(section), text])
def format_epilog_section(self, section, text):
"""Format a section for the epilog by inserting a format"""
try:
func = self._epilog_formatters[self.epilog_formatter]
except KeyError:
if not callable(self.epilog_formatter):
raise
func = self.epilog_formatter
return func(section, text)
def extract_as_epilog(self, text, sections=None, overwrite=False,
append=True):
"""Extract epilog sections from the a docstring
Parameters
----------
text
The docstring to use
sections: list of str
The headers of the sections to extract. If None, the
:attr:`epilog_sections` attribute is used
overwrite: bool
If True, overwrite the existing epilog
append: bool
If True, append to the existing epilog"""
if sections is None:
sections = self.epilog_sections
if ((not self.epilog or overwrite or append) and sections):
epilog_parts = []
for sec in sections:
text = docstrings._get_section(text, sec).strip()
if text:
epilog_parts.append(
self.format_epilog_section(sec, text))
if epilog_parts:
epilog = '\n\n'.join(epilog_parts)
if overwrite or not self.epilog:
self.epilog = epilog
else:
self.epilog += '\n\n' + epilog
def grouparg(self, arg, my_arg=None, parent_cmds=[]):
"""
Grouper function for chaining subcommands
Parameters
----------
arg: str
The current command line argument that is parsed
my_arg: str
The name of this subparser. If None, this parser is the main
parser and has no parent parser
parent_cmds: list of str
The available commands of the parent parsers
Returns
-------
str or None
The grouping key for the given `arg` or None if the key does
not correspond to this parser or this parser is the main parser
and does not have seen a subparser yet
Notes
-----
Quite complicated, there is no real need to deal with this function
"""
if self._subparsers_action is None:
return None
commands = self._subparsers_action.choices
currentarg = self.__currentarg
# the default return value is the current argument we are in or the
# name of the subparser itself
ret = currentarg or my_arg
if currentarg is not None:
# if we are already in a sub command, we use the sub parser
sp_key = commands[currentarg].grouparg(arg, currentarg, chain(
commands, parent_cmds))
if sp_key is None and arg in commands:
# if the subparser did not recognize the command, we use the
# command the corresponds to this parser or (of this parser
# is the parent parser) the current subparser
self.__currentarg = currentarg = arg
ret = my_arg or currentarg
elif sp_key not in commands and arg in parent_cmds:
# otherwise, if the subparser recognizes the commmand but it is
# not in the known command of this parser, it must be another
# command of the subparser and this parser can ignore it
ret = None
else:
# otherwise the command belongs to this subparser (if this one
# is not the subparser) or the current subparser
ret = my_arg or currentarg
elif arg in commands:
# if the argument is a valid subparser, we return this one
self.__currentarg = arg
ret = arg
elif arg in parent_cmds:
# if the argument is not a valid subparser but in one of our
# parents, we return None to signalize that we cannot categorize
# it
ret = None
return ret
def parse_known_args(self, args=None, namespace=None):
if self._chain_subparsers:
if args is None:
args = sys.argv[1:]
choices_d = OrderedDict()
remainders = OrderedDict()
main_args = []
# get the first argument to make sure that everything works
cmd = self.__currentarg = None
for i, (cmd, subargs) in enumerate(groupby(args, self.grouparg)):
if cmd is None:
main_args += list(subargs)
else:
# replace '-' by underscore
ns_cmd = cmd.replace('-', '_')
choices_d[ns_cmd], remainders[ns_cmd] = super(
FuncArgParser, self).parse_known_args(
list(chain(main_args, subargs)))
main_ns, remainders[None] = self.__parse_main(main_args)
for key, val in vars(main_ns).items():
choices_d[key] = val
self.__currentarg = None
if '__dummy' in choices_d:
del choices_d['__dummy']
return Namespace(**choices_d), list(chain(*remainders.values()))
# otherwise, use the default behaviour
return super(FuncArgParser, self).parse_known_args(args, namespace)
def __parse_main(self, args):
"""Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers
"""
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args)
@docstrings.get_sectionsf('FuncArgParser.update_short')
@docstrings.dedent
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val)
@docstrings.dedent
def update_shortf(self, **kwargs):
"""
Update the short optional arguments belonging to a function
This method acts exactly like :meth:`update_short` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_short.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_short`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_short', **kwargs)
@docstrings.get_sectionsf('FuncArgParser.update_long')
@docstrings.dedent
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val)
@docstrings.dedent
def update_longf(self, **kwargs):
"""
Update the long optional arguments belonging to a function
This method acts exactly like :meth:`update_long` but works as a
decorator (see :meth:`update_arg` and :meth:`update_argf`)
Parameters
----------
%(FuncArgParser.update_long.parameters)s
Returns
-------
function
The function that can be used as a decorator
Examples
--------
Use this method as a decorator::
>>> @parser.update_shortf(something='s', something_else='se')
... def do_something(something=None, something_else=None):
... ...
See also the examples in :meth:`update_long`.
See Also
--------
update_short, update_longf
"""
return self._as_decorator('update_long', **kwargs)
def parse2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
Note
----
This method does not cover subparsers!"""
kws = vars(self.parse_args(args))
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws)
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder
def parse_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args`.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
See also
--------
parse_known_chained
"""
kws = vars(self.parse_args(args))
return self._parse2subparser_funcs(kws)
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder
def _parse2subparser_funcs(self, kws):
"""
Recursive function to parse arguments to chained parsers
"""
choices = getattr(self._subparsers_action, 'choices', {})
replaced = {key.replace('-', '_'): key for key in choices}
sp_commands = set(replaced).intersection(kws)
if not sp_commands:
if self._setup_as is not None:
func = kws.pop(self._setup_as)
else:
try:
func = self._used_functions[-1]
except IndexError:
return None
return func(**{
key: kws[key] for key in set(kws).difference(choices)})
else:
ret = {}
for key in sp_commands:
ret[key.replace('-', '_')] = \
choices[replaced[key]]._parse2subparser_funcs(
vars(kws[key]))
return Namespace(**ret)
|
vmlaker/coils | coils/RateTicker.py | RateTicker.tick | python | def tick(self):
now = datetime.datetime.now()
self.add(now)
# Create a list of timestamps, one for each period and
# representing the beginning of that period (i.e. since now.)
tstamps = [now - datetime.timedelta(seconds=xx)
for xx in self._periods]
# Trim off any tick values older than the earliest timestamp.
self.removeLT(min(tstamps))
# Create a list of counts, one for each period and
# representing the number of ticks in that period.
counts = [self.getCountGT(xx) for xx in tstamps]
# Compute a list of rates for the periods.
rates = [float(xx)/yy for xx, yy in zip(counts, self._periods)]
# Return the rates as a tuple.
return tuple(rates) | Tick the ticker.
Return a tuple of values corresponding to periods given in initializer,
each value representing the rate of ticks (number of ticks per second)
during that period. | train | https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/RateTicker.py#L22-L46 | [
"def add(self, item):\n \"\"\"Add item to the list while maintaining sorted order.\"\"\"\n bisect.insort_left(self._list, item)\n",
"def removeLT(self, item):\n \"\"\"Trim off any elements less than *item*.\n Return number of elements trimmed.\"\"\"\n count = self.getCountLT(item)\n self._list = self._list[count:]\n return count\n"
] | class RateTicker(SortedList):
"""Computes rates of ticking."""
def __init__(self, periods):
"""Initialize the object with a tuple of time periods in seconds.
For example, use (60, 300, 900) to track rates at 1, 5 and 15 minute
periods (like when reporting system load.)"""
super(RateTicker, self).__init__()
self._periods = periods
|
vmlaker/coils | coils/UserInput.py | user_input | python | def user_input(
field, default='', choices=None, password=False,
empty_ok=False, accept=False):
result = ''
while not result:
prompt = field
if default:
prompt += ' [{0}]'.format(default)
prompt += ': '
if accept and not (not default and not empty_ok):
print(prompt)
result = '{0}'.format(default)
else:
if password:
result = getpass.getpass(prompt)
else:
result = input(prompt)
result = result.strip()
if not result:
result = default
if choices and result not in choices:
print('Must be one of {0}'.format(choices))
result = ''
if empty_ok:
break
return result | Prompt user for input until a value is retrieved or default
is accepted. Return the input.
Arguments:
*field* - Description of the input being prompted for.
*default* - Default value for the input accepted with a Return-key.
*password* - Whether the user input should not be echoed to screen.
*empty_ok* - Whether it's okay to accept an empty input.
*accept* - Whether to skip getting actual user input and just accept
the default value, unless prevented by the combination of
arguments *empty_ok* and *default*. That is, unless *default*
is an empty string and *empty_ok* is False. | train | https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/UserInput.py#L12-L55 | null | """Like the input built-in, but with bells and whistles."""
import getpass
# Use raw_input for Python 2.x
try:
input = raw_input
except NameError:
pass
|
vmlaker/coils | coils/SortedList.py | SortedList.getCountLT | python | def getCountLT(self, item):
index = bisect.bisect_left(self._list, item)
return index | Return number of elements less than *item*. | train | https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/SortedList.py#L18-L21 | null | class SortedList(object):
"""Maintains a list of sorted items, with fast trimming
using less-than/greater-than comparison."""
def __init__(self, donor=list()):
"""Initialize the object with a copy of the donor list, sorted."""
self._list = sorted(donor[:])
def add(self, item):
"""Add item to the list while maintaining sorted order."""
bisect.insort_left(self._list, item)
def getCountGT(self, item):
"""Return number of elements greater than *item*."""
index = bisect.bisect_right(self._list, item)
return len(self._list) - index
def removeLT(self, item):
"""Trim off any elements less than *item*.
Return number of elements trimmed."""
count = self.getCountLT(item)
self._list = self._list[count:]
return count
|
vmlaker/coils | coils/SortedList.py | SortedList.getCountGT | python | def getCountGT(self, item):
index = bisect.bisect_right(self._list, item)
return len(self._list) - index | Return number of elements greater than *item*. | train | https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/SortedList.py#L23-L26 | null | class SortedList(object):
"""Maintains a list of sorted items, with fast trimming
using less-than/greater-than comparison."""
def __init__(self, donor=list()):
"""Initialize the object with a copy of the donor list, sorted."""
self._list = sorted(donor[:])
def add(self, item):
"""Add item to the list while maintaining sorted order."""
bisect.insort_left(self._list, item)
def getCountLT(self, item):
"""Return number of elements less than *item*."""
index = bisect.bisect_left(self._list, item)
return index
def removeLT(self, item):
"""Trim off any elements less than *item*.
Return number of elements trimmed."""
count = self.getCountLT(item)
self._list = self._list[count:]
return count
|
vmlaker/coils | coils/SortedList.py | SortedList.removeLT | python | def removeLT(self, item):
count = self.getCountLT(item)
self._list = self._list[count:]
return count | Trim off any elements less than *item*.
Return number of elements trimmed. | train | https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/SortedList.py#L28-L33 | [
"def getCountLT(self, item):\n \"\"\"Return number of elements less than *item*.\"\"\"\n index = bisect.bisect_left(self._list, item)\n return index\n"
] | class SortedList(object):
"""Maintains a list of sorted items, with fast trimming
using less-than/greater-than comparison."""
def __init__(self, donor=list()):
"""Initialize the object with a copy of the donor list, sorted."""
self._list = sorted(donor[:])
def add(self, item):
"""Add item to the list while maintaining sorted order."""
bisect.insort_left(self._list, item)
def getCountLT(self, item):
"""Return number of elements less than *item*."""
index = bisect.bisect_left(self._list, item)
return index
def getCountGT(self, item):
"""Return number of elements greater than *item*."""
index = bisect.bisect_right(self._list, item)
return len(self._list) - index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.